code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
def A ( __UpperCamelCase = 4 ) -> list[list[int]]:
A__ = abs(__UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )]
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_row(reverse_column(__UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
return reverse_column(transpose(__UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = matrix[::-1]
return matrix
def A ( __UpperCamelCase ) -> list[list[int]]:
A__ = [x[::-1] for x in matrix]
return matrix
def A ( __UpperCamelCase ) -> None:
for i in matrix:
print(*__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 9 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float(moles / volume ) * nfactor )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowerCamelCase__ = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
lowerCamelCase__ = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ : List[Any] = DistilBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(_a , normalizer_state.pop("""type""" ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**_a )
_UpperCamelCase = do_lower_case
def A_ ( self , a , a=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , a , a = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , a , a = None ) -> Tuple[str]:
'''simple docstring'''
_UpperCamelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 712 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = nn.functional.normalize(lowerCAmelCase )
_UpperCamelCase = nn.functional.normalize(lowerCAmelCase )
return torch.mm(lowerCAmelCase , normalized_text_embeds.t() )
class lowerCAmelCase__ ( __lowercase ):
UpperCamelCase_ : Tuple = CLIPConfig
UpperCamelCase_ : str = ["CLIPEncoderLayer"]
def __init__( self , a ) -> Optional[Any]:
'''simple docstring'''
super().__init__(a )
_UpperCamelCase = CLIPVisionModel(config.vision_config )
_UpperCamelCase = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
_UpperCamelCase = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(17 ) , requires_grad=a )
_UpperCamelCase = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def A_ ( self , a , a ) -> str:
'''simple docstring'''
_UpperCamelCase = self.vision_model(a )[1] # pooled_output
_UpperCamelCase = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
_UpperCamelCase = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
_UpperCamelCase = []
_UpperCamelCase = image_embeds.shape[0]
for i in range(a ):
_UpperCamelCase = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCamelCase = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_UpperCamelCase = special_cos_dist[i][concept_idx]
_UpperCamelCase = self.special_care_embeds_weights[concept_idx].item()
_UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
_UpperCamelCase = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_UpperCamelCase = cos_dist[i][concept_idx]
_UpperCamelCase = self.concept_embeds_weights[concept_idx].item()
_UpperCamelCase = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
_UpperCamelCase = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A_ ( self , a , a ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.vision_model(a )[1] # pooled_output
_UpperCamelCase = self.visual_projection(a )
_UpperCamelCase = cosine_distance(a , self.special_care_embeds )
_UpperCamelCase = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_UpperCamelCase = 0.0
_UpperCamelCase = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_UpperCamelCase = torch.any(special_scores > 0 , dim=1 )
_UpperCamelCase = special_care * 0.01
_UpperCamelCase = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_UpperCamelCase = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_UpperCamelCase = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 202 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_a : Dict = parser.parse_args()
if args.model_type == "roberta":
_a : str = RobertaForMaskedLM.from_pretrained(args.model_name)
_a : List[str] = """roberta"""
elif args.model_type == "gpt2":
_a : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
_a : Dict = """transformer"""
_a : Optional[Any] = model.state_dict()
_a : Any = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_a : List[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_a : int = f"""{prefix}.embeddings.{w}.weight"""
_a : str = state_dict[param_name]
for w in ["weight", "bias"]:
_a : Dict = f"""{prefix}.embeddings.LayerNorm.{w}"""
_a : Any = state_dict[param_name]
# Transformer Blocks #
_a : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_a : List[Any] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_a : Dict = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_a : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_a : List[str] = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : Union[str, Any] = state_dict[f"""lm_head.dense.{w}"""]
_a : Tuple = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_a : Dict = state_dict[f"""{prefix}.ln_f.{w}"""]
_a : Optional[Any] = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 145 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Dict = 'dandelin/vilt-b32-finetuned-vqa'
a__ : List[str] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
a__ : List[str] = 'image_qa'
a__ : Optional[int] = AutoProcessor
a__ : List[str] = AutoModelForVisualQuestionAnswering
a__ : Union[str, Any] = ['image', 'text']
a__ : int = ['text']
def __init__( self : Tuple, *__lowerCamelCase : Union[str, Any], **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self, ['''vision'''] )
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
def __lowercase( self : List[Any], __lowerCamelCase : "Image", __lowerCamelCase : str ) -> Optional[Any]:
return self.pre_processor(__lowerCamelCase, __lowerCamelCase, return_tensors='''pt''' )
def __lowercase( self : Optional[int], __lowerCamelCase : List[str] ) -> Any:
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def __lowercase( self : List[str], __lowerCamelCase : Dict ) -> Dict:
UpperCamelCase__ : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 344 | 0 |
import qiskit
def __lowerCamelCase (UpperCAmelCase__ : List[Any] = 2 ):
SCREAMING_SNAKE_CASE = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowerCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowerCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowerCamelCase ) ) , list(range(_lowerCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_0_0_0 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""")
| 716 | from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowercase ( a ):
def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" )
SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
def __snake_case( self : List[Any] , _UpperCamelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F" to this method that includes {self.model_input_names[0]}, but you provided"
F" {list(processed_features.keys() )}" )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_UpperCamelCase ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE = required_input[0]
if isinstance(_UpperCamelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "tf"
elif is_torch_tensor(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = "pt"
elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE = "np"
else:
raise ValueError(
F"type of {first_element} unknown: {type(_UpperCamelCase )}. "
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
SCREAMING_SNAKE_CASE = []
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE = self._truncate(
_UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , )
truncated_inputs.append(_UpperCamelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = {}
for i in range(_UpperCamelCase ):
# padding
SCREAMING_SNAKE_CASE = self._pad(
truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE = value.astype(np.floataa )
batch_outputs[key].append(_UpperCamelCase )
return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (0, difference) )
SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE = np.pad(
processed_features["attention_mask"] , (difference, 0) )
SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE = np.pad(
_UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple:
'''simple docstring'''
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = padding
else:
SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 647 | 0 |
import math
def UpperCamelCase_ ( __a , __a ) -> float:
return math.pow(__A , 2 ) - a
def UpperCamelCase_ ( __a ) -> float:
return 2 * x
def UpperCamelCase_ ( __a ) -> float:
a__ : Tuple = 2.0
while start <= a:
a__ : List[Any] = math.pow(__A , 2 )
return start
def UpperCamelCase_ ( __a , __a = 9_999 , __a = 0.00000000000001 ) -> float:
if a < 0:
raise ValueError("math domain error" )
a__ : int = get_initial_point(__A )
for _ in range(__A ):
a__ : List[Any] = value
a__ : Dict = value - fx(__A , __A ) / fx_derivative(__A )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 37 |
import torch
from torch import nn
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A , _A , _A , _A , _A=1 , _A=False) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[str] = n_token
_UpperCAmelCase : List[Any] = d_embed
_UpperCAmelCase : List[str] = d_proj
_UpperCAmelCase : str = cutoffs + [n_token]
_UpperCAmelCase : Union[str, Any] = [0] + self.cutoffs
_UpperCAmelCase : Optional[Any] = div_val
_UpperCAmelCase : Tuple = self.cutoffs[0]
_UpperCAmelCase : Union[str, Any] = len(self.cutoffs) - 1
_UpperCAmelCase : Tuple = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_UpperCAmelCase : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
_UpperCAmelCase : int = nn.Parameter(torch.zeros(self.n_clusters))
_UpperCAmelCase : Tuple = nn.ModuleList()
_UpperCAmelCase : Optional[int] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A)))
else:
self.out_projs.append(_A)
self.out_layers.append(nn.Linear(_A , _A))
else:
for i in range(len(self.cutoffs)):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Any = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_A , _A)))
self.out_layers.append(nn.Linear(_A , r_idx - l_idx))
_UpperCAmelCase : List[str] = keep_order
def snake_case__ ( self , _A , _A , _A , _A) -> Optional[int]:
"""simple docstring"""
if proj is None:
_UpperCAmelCase : str = nn.functional.linear(_A , _A , bias=_A)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_UpperCAmelCase : Any = nn.functional.linear(_A , proj.t().contiguous())
_UpperCAmelCase : Optional[int] = nn.functional.linear(_A , _A , bias=_A)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _A , _A=None , _A=False) -> List[Any]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
_UpperCAmelCase : List[Any] = hidden[..., :-1, :].contiguous()
_UpperCAmelCase : Tuple = labels[..., 1:].contiguous()
_UpperCAmelCase : Optional[int] = hidden.view(-1 , hidden.size(-1))
_UpperCAmelCase : List[Any] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''')
else:
_UpperCAmelCase : Any = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
_UpperCAmelCase : Dict = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
_UpperCAmelCase : List[Any] = labels != -100
_UpperCAmelCase : Optional[int] = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device)
_UpperCAmelCase : List[Any] = (
-nn.functional.log_softmax(_A , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
_UpperCAmelCase : Any = nn.functional.log_softmax(_A , dim=-1)
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : Any = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : List[str] = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : List[str] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : Dict = self.out_layers[i].weight
_UpperCAmelCase : Dict = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCAmelCase : Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_A)
biases.append(_A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : List[Any] = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : Dict = nn.functional.log_softmax(_A , dim=1)
if labels is None:
_UpperCAmelCase : Union[str, Any] = hidden.new_empty((head_logit.size(0), self.n_token))
else:
_UpperCAmelCase : Tuple = torch.zeros_like(_A , dtype=hidden.dtype , device=hidden.device)
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Any = [0] + self.cutoffs
for i in range(len(_A) - 1):
_UpperCAmelCase , _UpperCAmelCase : str = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_UpperCAmelCase : Optional[int] = (labels >= l_idx) & (labels < r_idx)
_UpperCAmelCase : Optional[Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_UpperCAmelCase : int = labels.index_select(0 , _A) - l_idx
_UpperCAmelCase : Optional[int] = head_logprob.index_select(0 , _A)
_UpperCAmelCase : List[str] = hidden.index_select(0 , _A)
else:
_UpperCAmelCase : Optional[int] = hidden
if i == 0:
if labels is not None:
_UpperCAmelCase : Tuple = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
_UpperCAmelCase : int = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : Dict = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : List[Any] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_UpperCAmelCase : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
_UpperCAmelCase : str = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_UpperCAmelCase : Tuple = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''') and self.keep_order) or keep_order:
out.index_copy_(0 , _A , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _A) -> Dict:
"""simple docstring"""
if self.n_clusters == 0:
_UpperCAmelCase : List[str] = self._compute_logit(_A , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_A , dim=-1)
else:
# construct weights and biases
_UpperCAmelCase , _UpperCAmelCase : List[str] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
_UpperCAmelCase , _UpperCAmelCase : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_UpperCAmelCase : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
_UpperCAmelCase : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
_UpperCAmelCase : Any = self.out_layers[i].weight
_UpperCAmelCase : List[Any] = self.out_layers[i].bias
if i == 0:
_UpperCAmelCase : Any = torch.cat([weight_i, self.cluster_weight] , dim=0)
_UpperCAmelCase : Tuple = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_A)
biases.append(_A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = weights[0], biases[0], self.out_projs[0]
_UpperCAmelCase : int = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : str = hidden.new_empty((head_logit.size(0), self.n_token))
_UpperCAmelCase : List[str] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : List[str] = [0] + self.cutoffs
for i in range(len(_A) - 1):
_UpperCAmelCase , _UpperCAmelCase : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_UpperCAmelCase : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = weights[i], biases[i], self.out_projs[i]
_UpperCAmelCase : Dict = self._compute_logit(_A , _A , _A , _A)
_UpperCAmelCase : Optional[int] = nn.functional.log_softmax(_A , dim=1)
_UpperCAmelCase : Tuple = head_logprob[:, -i] + tail_logprob_i
_UpperCAmelCase : str = logprob_i
return out
| 485 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
_A = list[tuple[int, int]]
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCamelCase :
def __init__( self : Any , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Node | None ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = pos_x
lowerCAmelCase__ : List[str] = pos_y
lowerCAmelCase__ : Any = (pos_y, pos_x)
lowerCAmelCase__ : Dict = goal_x
lowerCAmelCase__ : List[str] = goal_y
lowerCAmelCase__ : Optional[int] = parent
class _lowerCamelCase :
def __init__( self : Dict , UpperCamelCase : tuple[int, int] , UpperCamelCase : tuple[int, int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : Dict = [self.start]
lowerCAmelCase__ : str = False
def _lowerCAmelCase ( self : Dict ) -> Path | None:
"""simple docstring"""
while self.node_queue:
lowerCAmelCase__ : Any = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase__ : Optional[int] = True
return self.retrace_path(UpperCamelCase )
lowerCAmelCase__ : Dict = self.get_successors(UpperCamelCase )
for node in successors:
self.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Node ) -> list[Node]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
for action in delta:
lowerCAmelCase__ : int = parent.pos_x + action[1]
lowerCAmelCase__ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase , UpperCamelCase , self.target.pos_y , self.target.pos_x , UpperCamelCase ) )
return successors
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Node | None ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = node
lowerCAmelCase__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase__ : Tuple = current_node.parent
path.reverse()
return path
class _lowerCamelCase :
def __init__( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Tuple = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = False
def _lowerCAmelCase ( self : int ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase__ : Any = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase__ : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase__ : Optional[Any] = True
return self.retrace_bidirectional_path(
UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = current_bwd_node
lowerCAmelCase__ : Dict = current_fwd_node
lowerCAmelCase__ : Any = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Node , UpperCamelCase : Node ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : str = self.fwd_bfs.retrace_path(UpperCamelCase )
lowerCAmelCase__ : List[str] = self.bwd_bfs.retrace_path(UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase__ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A = time.time()
_A = BreadthFirstSearch(init, goal)
_A = bfs.search()
_A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
_A = time.time()
_A = BidirectionalBreadthFirstSearch(init, goal)
_A = bd_bfs.search()
_A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 507 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :List[nn.Module] = field(default_factory=a_ )
_lowerCamelCase :list = field(default_factory=a_ )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Tensor , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase )
def __call__( self : int , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :nn.Module
_lowerCamelCase :int = 0
_lowerCamelCase :List = field(default_factory=a_ )
_lowerCamelCase :List = field(default_factory=a_ )
def __call__( self : str , UpperCamelCase : Tensor ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Union[str, Any] = Tracker(self.src )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Any = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while"""
f""" destination module has {len(UpperCamelCase )}.""" )
for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> List[str]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : Any = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowerCAmelCase__ : int = ResNetForImageClassification(__UpperCAmelCase ).eval()
lowerCAmelCase__ : List[str] = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
lowerCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> List[str]:
lowerCAmelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507 | 1 |
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = "arrow" , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(
split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Union[str, Any] = load_from_cache_file
snake_case : Optional[Any] = file_format
snake_case : Optional[Any] = Spark(
df=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , working_dir=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 178 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''longformer'''
def __init__( self , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 1 , UpperCamelCase__ = 0 , UpperCamelCase__ = 2 , UpperCamelCase__ = 3_0522 , UpperCamelCase__ = 768 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = 3072 , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1e-12 , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : int = attention_window
snake_case : Any = sep_token_id
snake_case : Dict = bos_token_id
snake_case : int = eos_token_id
snake_case : List[str] = vocab_size
snake_case : Dict = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Any = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : str = onnx_export
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = True
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case : str = super().outputs
if self.task == "default":
snake_case : str = {0: "batch"}
return outputs
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case : Any = 1
return inputs
| 178 | 1 |
'''simple docstring'''
from torch import nn
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 709 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
@register_to_config
def __init__( self : List[str] , *,
snake_case_ : int = 4 , snake_case_ : int = 7_6_8 , snake_case_ : int , snake_case_ : int , ):
super().__init__()
_UpperCAmelCase = nn.Parameter(torch.zeros(snake_case_ ) )
# parameters for additional clip time embeddings
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
# parameters for encoder hidden states
_UpperCAmelCase = clip_extra_context_tokens
_UpperCAmelCase = nn.Linear(
snake_case_ , self.clip_extra_context_tokens * cross_attention_dim )
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ )
_UpperCAmelCase = nn.LayerNorm(snake_case_ )
def lowercase ( self : Union[str, Any] , *, snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_UpperCAmelCase = image_embeddings.shape[0]
_UpperCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_UpperCAmelCase = classifier_free_guidance_embeddings.expand(
snake_case_ , -1 )
_UpperCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_UpperCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_UpperCAmelCase = self.embedding_proj(snake_case_ )
_UpperCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(snake_case_ )
_UpperCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_UpperCAmelCase = self.clip_extra_context_tokens_proj(snake_case_ )
_UpperCAmelCase = clip_extra_context_tokens.reshape(snake_case_ , -1 , self.clip_extra_context_tokens )
_UpperCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
_UpperCAmelCase = self.encoder_hidden_states_proj(snake_case_ )
_UpperCAmelCase = self.text_encoder_hidden_states_norm(snake_case_ )
_UpperCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 119 | 0 |
from __future__ import annotations
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] ):
__snake_case : List[Any] = len(__UpperCAmelCase )
# We need to create solution object to save path.
__snake_case : Optional[int] = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__snake_case : int = run_maze(__UpperCAmelCase , 0 , 0 , __UpperCAmelCase )
if solved:
print('\n'.join(str(__UpperCAmelCase ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] ):
__snake_case : List[Any] = len(__UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
__snake_case : List[Any] = 1
return True
__snake_case : str = (not i < 0) and (not j < 0) # Check lower bounds
__snake_case : Tuple = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__snake_case : Dict = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__snake_case : int = 1
# check for directions
if (
run_maze(__UpperCAmelCase , i + 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j + 1 , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j - 1 , __UpperCAmelCase )
):
return True
__snake_case : Union[str, Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 576 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = "retribert"
def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
__snake_case : Tuple = vocab_size
__snake_case : int = hidden_size
__snake_case : Optional[int] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Any = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : Union[str, Any] = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Tuple = share_encoders
__snake_case : int = projection_dim
| 576 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( a__ ):
lowerCamelCase : int =["""pixel_values"""]
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : List[Any] = size if size is not None else {"shortest_edge": 224}
a : Optional[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
a : Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ , param_name="crop_size" )
a : Tuple = do_resize
a : Optional[int] = size
a : Tuple = resample
a : List[str] = do_center_crop
a : List[str] = crop_size
a : Tuple = do_rescale
a : Tuple = rescale_factor
a : int = do_normalize
a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
a : str = do_convert_rgb
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BICUBIC , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
a : Optional[Any] = get_resize_output_image_size(lowerCAmelCase__ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : int = do_resize if do_resize is not None else self.do_resize
a : Union[str, Any] = size if size is not None else self.size
a : int = get_size_dict(lowerCAmelCase__ , param_name="size" , default_to_square=lowerCAmelCase__ )
a : Union[str, Any] = resample if resample is not None else self.resample
a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : str = crop_size if crop_size is not None else self.crop_size
a : Optional[int] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" , default_to_square=lowerCAmelCase__ )
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : Dict = image_mean if image_mean is not None else self.image_mean
a : Optional[Any] = image_std if image_std is not None else self.image_std
a : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a : Any = [convert_to_rgb(lowerCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
a : int = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
a : Dict = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
a : str = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : List[Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : Optional[Any] = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : List[str] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : int = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 31 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31 | 1 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
a = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> List[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__SCREAMING_SNAKE_CASE = new_id
# turn into Numpy arrays
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )
if reduce_labels:
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label - 1
__SCREAMING_SNAKE_CASE = 255
__SCREAMING_SNAKE_CASE = label != ignore_index
__SCREAMING_SNAKE_CASE = np.not_equal(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pred_label[mask]
__SCREAMING_SNAKE_CASE = np.array(__UpperCAmelCase )[mask]
__SCREAMING_SNAKE_CASE = pred_label[pred_label == label]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0]
__SCREAMING_SNAKE_CASE = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
__SCREAMING_SNAKE_CASE = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = total_intersect_and_union(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# compute metrics
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = total_area_intersect.sum() / total_area_label.sum()
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_union
__SCREAMING_SNAKE_CASE = total_area_intersect / total_area_label
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.nanmean(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = all_acc
__SCREAMING_SNAKE_CASE = iou
__SCREAMING_SNAKE_CASE = acc
if nan_to_num is not None:
__SCREAMING_SNAKE_CASE = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : bool ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Dict[int, int]] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = mean_iou(
results=lowerCamelCase ,gt_seg_maps=lowerCamelCase ,num_labels=lowerCamelCase ,ignore_index=lowerCamelCase ,nan_to_num=lowerCamelCase ,label_map=lowerCamelCase ,reduce_labels=lowerCamelCase ,)
return iou_result
| 109 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = ["input_values", "attention_mask"]
def __init__(self : int , snake_case : int = 1 , snake_case : int = 1_6000 , snake_case : float = 0.0 , snake_case : bool = False , snake_case : int = 80 , snake_case : int = 16 , snake_case : int = 64 , snake_case : str = "hann_window" , snake_case : float = 1.0 , snake_case : float = 80 , snake_case : float = 7600 , snake_case : float = 1e-10 , snake_case : int = 2 , snake_case : bool = True , **snake_case : Any , ) -> str:
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
_lowercase : int = do_normalize
_lowercase : Any = return_attention_mask
_lowercase : str = num_mel_bins
_lowercase : str = hop_length
_lowercase : List[Any] = win_length
_lowercase : Tuple = win_function
_lowercase : Any = frame_signal_scale
_lowercase : Any = fmin
_lowercase : Any = fmax
_lowercase : List[str] = mel_floor
_lowercase : str = reduction_factor
_lowercase : str = win_length * sampling_rate // 1000
_lowercase : Optional[int] = hop_length * sampling_rate // 1000
_lowercase : Tuple = optimal_fft_length(self.sample_size )
_lowercase : int = (self.n_fft // 2) + 1
_lowercase : Tuple = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
_lowercase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a(snake_case : List[np.ndarray] , snake_case : List[np.ndarray] , snake_case : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_lowercase : List[str] = np.array(snake_case , np.intaa )
_lowercase : Dict = []
for vector, length in zip(snake_case , attention_mask.sum(-1 ) ):
_lowercase : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_lowercase : List[str] = padding_value
normed_input_values.append(snake_case )
else:
_lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _a(self : Optional[Any] , snake_case : np.ndarray , ) -> np.ndarray:
_lowercase : Tuple = spectrogram(
snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__(self : Tuple , snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Optional[int] = None , snake_case : bool = False , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Optional[int] = None , **snake_case : Union[str, Any] , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
_lowercase : Union[str, Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
else:
_lowercase : str = None
if audio_target is not None:
_lowercase : Union[str, Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
if inputs is None:
return inputs_target
else:
_lowercase : int = inputs_target["input_values"]
_lowercase : Optional[int] = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
_lowercase : List[str] = decoder_attention_mask
return inputs
def _a(self : Optional[Any] , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : bool = False , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Optional[int] = None , snake_case : bool = False , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : int , ) -> BatchFeature:
_lowercase : int = isinstance(snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_lowercase : List[str] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : List[Any] = [np.asarray(snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
_lowercase : Optional[Any] = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : int = [speech]
# needed to make pad() work on spectrogram inputs
_lowercase : Tuple = self.feature_size
# convert into correct format for padding
if is_target:
_lowercase : List[str] = [self._extract_mel_features(snake_case ) for waveform in speech]
_lowercase : int = BatchFeature({"input_values": features} )
_lowercase : str = self.num_mel_bins
else:
_lowercase : Optional[int] = BatchFeature({"input_values": speech} )
_lowercase : List[Any] = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
_lowercase : Optional[Any] = feature_size_hack
# convert input values to correct format
_lowercase : int = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
_lowercase : Any = [np.asarray(snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
_lowercase : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
_lowercase : Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
_lowercase : Union[str, Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
_lowercase : str = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
_lowercase : Dict = (
attention_mask
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_lowercase : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case , padding_value=self.padding_value )
if return_tensors is not None:
_lowercase : Any = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
def _a(self : Dict ) -> Dict[str, Any]:
_lowercase : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
_lowercase : Optional[Any] = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 461 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase : Dict = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
UpperCAmelCase : Optional[int] = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('utf-8').split()
UpperCAmelCase : Any = '|'.join(sys.argv[1:])
UpperCAmelCase : List[Any] = re.compile(RF"^({joined_dirs}).*?\.py$")
UpperCAmelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 299 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase : Tuple = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
UpperCAmelCase : Dict = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
UpperCAmelCase : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def lowerCamelCase ( _UpperCamelCase : str ) -> dict[str, int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCamelCase ( _UpperCamelCase : tuple ) -> str:
'''simple docstring'''
return x[0]
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = get_letter_count(_UpperCamelCase )
__UpperCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_UpperCamelCase )
__UpperCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_UpperCamelCase )
__UpperCAmelCase : Any = """""".join(freq_to_letter[freq] )
__UpperCAmelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_UpperCamelCase , reverse=_UpperCamelCase )
__UpperCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = get_frequency_order(_UpperCamelCase )
__UpperCAmelCase : Optional[int] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 1 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCAmelCase = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCAmelCase = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCAmelCase = """|""".join(sys.argv[1:])
UpperCAmelCase = re.compile(rf'''^({joined_dirs}).*?\.py$''')
UpperCAmelCase = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 88 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = '''data2vec-vision'''
def __init__( self : str , _snake_case : str=768 , _snake_case : Tuple=12 , _snake_case : Any=12 , _snake_case : Optional[int]=3072 , _snake_case : Tuple="gelu" , _snake_case : Dict=0.0 , _snake_case : Any=0.0 , _snake_case : Tuple=0.02 , _snake_case : List[Any]=1E-1_2 , _snake_case : int=224 , _snake_case : List[str]=16 , _snake_case : List[str]=3 , _snake_case : Optional[int]=False , _snake_case : str=False , _snake_case : Tuple=False , _snake_case : Tuple=False , _snake_case : Any=0.1 , _snake_case : Any=0.1 , _snake_case : List[Any]=True , _snake_case : List[Any]=[3, 5, 7, 11] , _snake_case : List[Any]=[1, 2, 3, 6] , _snake_case : Tuple=True , _snake_case : str=0.4 , _snake_case : Any=256 , _snake_case : Any=1 , _snake_case : str=False , _snake_case : str=255 , **_snake_case : Dict , ):
super().__init__(**_snake_case )
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : Union[str, Any] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : List[str] = layer_norm_eps
__lowercase : str = image_size
__lowercase : List[str] = patch_size
__lowercase : Dict = num_channels
__lowercase : Optional[Any] = use_mask_token
__lowercase : Optional[int] = use_absolute_position_embeddings
__lowercase : Tuple = use_relative_position_bias
__lowercase : Dict = use_shared_relative_position_bias
__lowercase : List[Any] = layer_scale_init_value
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase : List[str] = out_indices
__lowercase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase : Dict = use_auxiliary_head
__lowercase : str = auxiliary_loss_weight
__lowercase : Union[str, Any] = auxiliary_channels
__lowercase : Dict = auxiliary_num_convs
__lowercase : Dict = auxiliary_concat_input
__lowercase : Dict = semantic_loss_ignore_index
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : int = version.parse('''1.11''' )
@property
def snake_case_ ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case_ ( self : Tuple ):
return 1E-4
| 509 | 0 |
from manim import *
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Rectangle(height=0.5 , width=0.5)
__a = Rectangle(height=0.25 , width=0.25)
__a = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
__a = [mem.copy() for i in range(6)]
__a = [mem.copy() for i in range(6)]
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = Text('''CPU''' , font_size=24)
__a = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE)
cpu.move_to([-2.5, -0.5, 0])
self.add(__SCREAMING_SNAKE_CASE)
__a = [mem.copy() for i in range(4)]
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = Text('''GPU''' , font_size=24)
__a = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE)
gpu.move_to([-1, -1, 0])
self.add(__SCREAMING_SNAKE_CASE)
__a = [mem.copy() for i in range(6)]
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = Text('''Model''' , font_size=24)
__a = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE)
model.move_to([3, -1.0, 0])
self.add(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
__a = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE):
rect.set_stroke(__SCREAMING_SNAKE_CASE)
__a = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=__SCREAMING_SNAKE_CASE)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0)
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0)
self.add(__SCREAMING_SNAKE_CASE)
model_cpu_arr.append(__SCREAMING_SNAKE_CASE)
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE)
__a = [mem.copy() for i in range(6)]
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = Text('''Loaded Checkpoint''' , font_size=24)
__a = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE)
checkpoint.move_to([3, 0.5, 0])
self.add(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE):
__a = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7)
target.move_to(__SCREAMING_SNAKE_CASE)
ckpt_arr.append(__SCREAMING_SNAKE_CASE)
__a = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.move_to(cpu_right_col_base[i - 5])
ckpt_cpu_arr.append(__SCREAMING_SNAKE_CASE)
self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE)
__a = Square(side_length=2.2)
key.move_to([-5, 2, 0])
__a = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left())
self.add(__SCREAMING_SNAKE_CASE)
__a = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0])
__a = [meta_mem.copy() for i in range(6)]
__a = [meta_mem.copy() for i in range(6)]
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = VGroup(*__SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0)
__a = Text('''Disk''' , font_size=24)
__a = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE)
disk.move_to([-4.0, -1.25, 0])
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3) , Write(__SCREAMING_SNAKE_CASE , run_time=1) , Create(__SCREAMING_SNAKE_CASE , run_time=1))
__a = []
for i, rect in enumerate(__SCREAMING_SNAKE_CASE):
__a = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i]).scale(0.5)
animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5))
self.play(*__SCREAMING_SNAKE_CASE)
self.play(FadeOut(__SCREAMING_SNAKE_CASE))
__a = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24)
step_a.move_to([2, 2, 0])
self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3))
self.play(
FadeOut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE) , )
self.wait()
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __A ):
def __init__(self , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''bs4'''] )
super().__init__(**__magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = []
snake_case_ : Optional[int] = []
snake_case_ : str = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
snake_case_ : Dict = parent.find_all(child.name , recursive=__magic_name__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__magic_name__ ) else next(i for i, s in enumerate(__magic_name__ , 1 ) if s is child ) )
snake_case_ : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase (self , __magic_name__ ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = BeautifulSoup(__magic_name__ , '''html.parser''' )
snake_case_ : Optional[Any] = []
snake_case_ : Union[str, Any] = []
snake_case_ : str = []
for element in html_code.descendants:
if type(__magic_name__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
snake_case_ : List[Any] = html.unescape(__magic_name__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__magic_name__ )
snake_case_ : Tuple = self.xpath_soup(__magic_name__ )
stringaxtag_seq.append(__magic_name__ )
stringaxsubs_seq.append(__magic_name__ )
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__magic_name__ ) != len(__magic_name__ ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = ''
for tagname, subs in zip(__magic_name__ , __magic_name__ ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__(self , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = False
# Check that strings has a valid type
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : int = True
elif isinstance(__magic_name__ , (list, tuple) ):
if len(__magic_name__ ) == 0 or isinstance(html_strings[0] , __magic_name__ ):
snake_case_ : Dict = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(__magic_name__ )}.''' )
snake_case_ : List[Any] = bool(isinstance(__magic_name__ , (list, tuple) ) and (isinstance(html_strings[0] , __magic_name__ )) )
if not is_batched:
snake_case_ : Optional[Any] = [html_strings]
# Get nodes + xpaths
snake_case_ : Tuple = []
snake_case_ : Any = []
for html_string in html_strings:
snake_case_ : List[str] = self.get_three_from_single(__magic_name__ )
nodes.append(__magic_name__ )
snake_case_ : List[str] = []
for node, tag_list, sub_list in zip(__magic_name__ , __magic_name__ , __magic_name__ ):
snake_case_ : List[str] = self.construct_xpath(__magic_name__ , __magic_name__ )
xpath_strings.append(__magic_name__ )
xpaths.append(__magic_name__ )
# return as Dict
snake_case_ : Optional[int] = {'nodes': nodes, 'xpaths': xpaths}
snake_case_ : List[str] = BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
return encoded_inputs
| 60 | from __future__ import annotations
def UpperCamelCase ( __lowercase : list[int] ):
'''simple docstring'''
A_ : int = len(__lowercase ) // 2
# choose the middle 3 elements
A_ : List[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 558 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_UpperCAmelCase : Tuple = TypeVar("""T""")
class __UpperCAmelCase ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self , snake_case ):
snake_case_ = deque()
snake_case_ = set()
if not n:
snake_case_ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
snake_case_ = n
def a ( self , snake_case ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
snake_case_ = self.dq_store.pop()
self.key_reference.remove(snake_case )
else:
self.dq_store.remove(snake_case )
self.dq_store.appendleft(snake_case )
self.key_reference.add(snake_case )
def a ( self ):
for k in self.dq_store:
print(snake_case )
def __repr__( self ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 707 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
'''simple docstring'''
model.train()
snake_case_ = model(UpperCamelCase__ )
snake_case_ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(UpperCamelCase__ )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE_ = """us-east-1""" # defaults region
@dataclass
class snake_case_ :
__lowerCAmelCase = 42
__lowerCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
__lowerCAmelCase = {
"task_name": "mnli",
"per_device_train_batch_size": 1_6,
"per_device_eval_batch_size": 1_6,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 5_0_0,
"save_steps": 5_5_0_0,
}
__lowerCAmelCase = {**hyperparameters, "max_steps": 1_0_0_0}
@property
def snake_case_ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self ):
return F"""{self.framework}-transfromers-test"""
@property
def snake_case_ ( self ):
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def snake_case_ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
a_ : Any = SageMakerTestEnvironment(framework=request.cls.framework ) | 237 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __A(lowerCAmelCase , lowerCAmelCase ) -> np.array:
"""simple docstring"""
_UpperCamelCase = F'{sampling_rate}'
_UpperCamelCase = """1"""
_UpperCamelCase = """f32le"""
_UpperCamelCase = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCamelCase = ffmpeg_process.communicate(lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_UpperCamelCase = output_stream[0]
_UpperCamelCase = np.frombuffer(lowerCAmelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = "f32le" , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = F'{sampling_rate}'
_UpperCamelCase = """1"""
if format_for_conversion == "s16le":
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
_UpperCamelCase = platform.system()
if system == "Linux":
_UpperCamelCase = """alsa"""
_UpperCamelCase = """default"""
elif system == "Darwin":
_UpperCamelCase = """avfoundation"""
_UpperCamelCase = """:0"""
elif system == "Windows":
_UpperCamelCase = """dshow"""
_UpperCamelCase = """default"""
_UpperCamelCase = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCamelCase = _ffmpeg_stream(lowerCAmelCase , lowerCAmelCase )
for item in iterator:
yield item
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "f32le" , ) -> Any:
"""simple docstring"""
if stream_chunk_s is not None:
_UpperCamelCase = stream_chunk_s
else:
_UpperCamelCase = chunk_length_s
_UpperCamelCase = ffmpeg_microphone(lowerCAmelCase , lowerCAmelCase , format_for_conversion=lowerCAmelCase )
if format_for_conversion == "s16le":
_UpperCamelCase = np.intaa
_UpperCamelCase = 2
elif format_for_conversion == "f32le":
_UpperCamelCase = np.floataa
_UpperCamelCase = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
_UpperCamelCase = chunk_length_s / 6
_UpperCamelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase , (int, float) ):
_UpperCamelCase = [stride_length_s, stride_length_s]
_UpperCamelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCamelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCamelCase = datetime.datetime.now()
_UpperCamelCase = datetime.timedelta(seconds=lowerCAmelCase )
for item in chunk_bytes_iter(lowerCAmelCase , lowerCAmelCase , stride=(stride_left, stride_right) , stream=lowerCAmelCase ):
# Put everything back in numpy scale
_UpperCamelCase = np.frombuffer(item["""raw"""] , dtype=lowerCAmelCase )
_UpperCamelCase = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
_UpperCamelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ) -> str:
"""simple docstring"""
_UpperCamelCase = b""""""
_UpperCamelCase , _UpperCamelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
_UpperCamelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase ) < chunk_len:
_UpperCamelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
_UpperCamelCase = (_stride_left, stride_right)
_UpperCamelCase = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
_UpperCamelCase = False
yield item
_UpperCamelCase = stride_left
_UpperCamelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase ) > stride_left:
_UpperCamelCase = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
_UpperCamelCase = False
yield item
def __A(lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 2**2_4 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase , stdout=subprocess.PIPE , bufsize=lowerCAmelCase ) as ffmpeg_process:
while True:
_UpperCamelCase = ffmpeg_process.stdout.read(lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 612 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : str=13 , lowerCamelCase : Any=64 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=3 , lowerCamelCase : List[Any]=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=5 , lowerCamelCase : int=4 , lowerCamelCase : Dict=37 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[Any]=0.1 , lowerCamelCase : int=10 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Dict=[1, 16, 4, 4] , lowerCamelCase : Optional[int]=None , ) -> Tuple:
__snake_case : Any = parent
__snake_case : List[str] = batch_size
__snake_case : Tuple = image_size
__snake_case : List[Any] = patch_size
__snake_case : Union[str, Any] = num_channels
__snake_case : str = is_training
__snake_case : Any = use_labels
__snake_case : List[str] = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : int = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : Any = attention_probs_dropout_prob
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : int = scope
__snake_case : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__snake_case : str = (self.image_size // 32) ** 2
__snake_case : Union[str, Any] = num_patches + 1
def __snake_case ( self : Union[str, Any] ) -> Any:
__snake_case : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int ) -> Optional[Any]:
__snake_case : List[Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__UpperCamelCase , )
def __snake_case ( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
__snake_case : Optional[Any] = ViTHybridModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__snake_case : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : Any , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Dict ) -> Dict:
__snake_case : str = self.type_sequence_label_size
__snake_case : Dict = ViTHybridForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__snake_case : int = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : int ) -> str:
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Dict = config_and_inputs
__snake_case : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Optional[Any] = False
def __snake_case ( self : List[str] ) -> Any:
__snake_case : Any = ViTHybridModelTester(self )
__snake_case : Tuple = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def __snake_case ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __snake_case ( self : Any ) -> Any:
pass
def __snake_case ( self : Tuple ) -> int:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : int = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__UpperCamelCase )
__snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Optional[Any] = [*signature.parameters.keys()]
__snake_case : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def __snake_case ( self : int ) -> int:
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __snake_case ( self : Any ) -> Optional[Any]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def __snake_case ( self : str ) -> Tuple:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : int = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__snake_case : Dict = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __snake_case ( self : List[str] ) -> Optional[Any]:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Optional[Any] = ViTHybridModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase_ ( ):
__snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Dict ) -> Tuple:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : int ) -> List[Any]:
__snake_case : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
__snake_case : Any = self.default_image_processor
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**__UpperCamelCase )
# verify the logits
__snake_case : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__snake_case : str = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : Optional[int] = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
__snake_case : str = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
__snake_case : Any = prepare_img()
__snake_case : Tuple = image_processor(images=__UpperCamelCase , return_tensors="pt" )
__snake_case : Tuple = model(**__UpperCamelCase )
__snake_case : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
__snake_case : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 715 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_snake_case : Dict = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __snake_case ( self : str ) -> str:
__snake_case : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
__snake_case : Optional[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__snake_case : Any = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
__snake_case : int = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__snake_case : List[str] = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
__snake_case : Dict = text_classifier("This is great !" , return_all_scores=lowerCamelCase )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
__snake_case : Any = text_classifier("This is great !" , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
__snake_case : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
__snake_case : Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def __snake_case ( self : Optional[int] ) -> List[Any]:
import torch
__snake_case : Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
__snake_case : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def __snake_case ( self : Any ) -> Tuple:
__snake_case : List[Any] = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
__snake_case : str = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def __snake_case ( self : int ) -> int:
__snake_case : Dict = pipeline("text-classification" )
__snake_case : Dict = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
__snake_case : int = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
__snake_case : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def __snake_case ( self : List[Any] ) -> str:
__snake_case : Optional[Any] = pipeline("text-classification" , framework="tf" )
__snake_case : Any = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
__snake_case : Optional[int] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
__snake_case : Any = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def __snake_case ( self : str , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ) -> List[Any]:
__snake_case : Union[str, Any] = TextClassificationPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ) -> str:
__snake_case : Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__snake_case : Any = "HuggingFace is in"
__snake_case : Tuple = text_classifier(lowerCamelCase )
self.assertEqual(nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__snake_case : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"]
__snake_case : Union[str, Any] = text_classifier(lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}, {"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__snake_case : str = text_classifier(lowerCamelCase , top_k=lowerCamelCase )
__snake_case : int = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [[{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] * N, [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] * N] , )
__snake_case : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__snake_case : Optional[Any] = text_classifier(lowerCamelCase )
self.assertEqual(
nested_simplify(lowerCamelCase ) , {"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__snake_case : Any = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCamelCase ):
text_classifier(lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__snake_case : List[Any] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"label": ANY(lowerCamelCase ), "score": ANY(lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 203 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowerCAmelCase__ : Any =False
try:
lowerCAmelCase__ : List[str] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = [] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = choices
SCREAMING_SNAKE_CASE_ : Optional[int] = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE_ : List[Any] = '*'
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '➔ '
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , lowerCAmelCase__ )
else:
forceWrite(self.choices[index] , lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
if index == self.position:
forceWrite(F''' {self.arrow_char} ''' )
self.write_choice(lowerCAmelCase__ )
else:
forceWrite(F''' {self.choices[index]}''' )
reset_cursor()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase__ )
move_cursor(lowerCAmelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase__ )] for number in range(1_0 )] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase__ )
else:
return
else:
return
def UpperCamelCase__ ( self , lowerCAmelCase__ = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
SCREAMING_SNAKE_CASE_ : List[Any] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase__ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE_ : Tuple = default_choice
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase__ , '\n' )
return choice
| 101 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase = """microsoft/speecht5_tts"""
_UpperCAmelCase = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
_UpperCAmelCase = """text_reader"""
_UpperCAmelCase = SpeechTaProcessor
_UpperCAmelCase = SpeechTaForTextToSpeech
_UpperCAmelCase = SpeechTaHifiGan
_UpperCAmelCase = ["""text"""]
_UpperCAmelCase = ["""audio"""]
def UpperCamelCase__ ( self ):
"""simple docstring"""
if self.post_processor is None:
SCREAMING_SNAKE_CASE_ : List[Any] = 'microsoft/speecht5_hifigan'
super().setup()
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.pre_processor(text=lowerCAmelCase__ , return_tensors='pt' , truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
SCREAMING_SNAKE_CASE_ : List[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(embeddings_dataset[7_3_0_5]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 101 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 701 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE_ = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE_ = BASE_URL + '/user'
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE_ = os.environ.get('USER_TOKEN', '')
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> dict[Any, Any]:
"""simple docstring"""
__a = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 201 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : Dict = iter(lowerCAmelCase )
while True:
A__ : Optional[int] = tuple(itertools.islice(lowerCAmelCase , lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _A( lowerCAmelCase ):
A__ : str = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
A__ : Union[str, Any] = """"""
if len(lowerCAmelCase ) < 2:
return dirty
for i in range(len(lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase ) & 1:
clean += "X"
return clean
def _A( lowerCAmelCase ):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
A__ : Any = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A__ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase )
return table
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : List[Any] = generate_table(lowerCAmelCase )
A__ : List[Any] = prepare_input(lowerCAmelCase )
A__ : Union[str, Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase , 2 ):
A__ , A__ : str = divmod(table.index(lowerCAmelCase ) , 5 )
A__ , A__ : List[str] = divmod(table.index(lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _A( lowerCAmelCase , lowerCAmelCase ):
A__ : Tuple = generate_table(lowerCAmelCase )
A__ : Optional[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase , 2 ):
A__ , A__ : Optional[int] = divmod(table.index(lowerCAmelCase ) , 5 )
A__ , A__ : List[str] = divmod(table.index(lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 363 | """simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __UpperCAmelCase (unittest.TestCase , __A ):
'''simple docstring'''
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : List[Any] = load_tool("""text-classification""" )
self.tool.setup()
A__ : Any = load_tool("""text-classification""" , remote=snake_case_ )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : Optional[Any] = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : str = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
def lowerCamelCase ( self ):
'''simple docstring'''
A__ : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(snake_case_ , """positive""" )
| 363 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> Union[str, Any]:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCamelCase = 1
_lowerCamelCase = 1
while repunit:
_lowerCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000_000 )-> Tuple:
_lowerCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 719 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ : List[str] ={
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[Any] )-> Tuple:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[str] = None )-> None:
_lowerCamelCase = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , snake_case ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = requirement, None, None
else:
_lowerCamelCase = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_full.split(',' ) # there could be multiple requirements
_lowerCamelCase = {}
for w in want_range:
_lowerCamelCase = re.findall(r'^([\s!=<>]{1,2})(.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_lowerCamelCase = '.'.join([str(snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return
# check if any version is installed
try:
_lowerCamelCase = importlib.metadata.version(snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> List[Any]:
_lowerCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case , snake_case )
| 222 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class UpperCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
A__ : str = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'text': Value('string' )} )
A__ : ClassVar[Features] = Features({} )
A__ : str = "text"
@property
def _lowercase ( self ) -> Union[str, Any]:
return {self.text_column: "text"}
| 683 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__snake_case = Vector()
def a (self : str ):
"""simple docstring"""
__snake_case = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a__ ) , '''(0,0,0,0,0,1)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3, 4] )
self.assertEqual(len(a__ ) , 4 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2] )
__snake_case = Vector([1, 2, 3, 4, 5] )
__snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([2, -1, 4] ) # for test of dot product
__snake_case = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def a (self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 2, 3] )
__snake_case = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a__ , a__ ) ) , '''(3,4,7)''' )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0, 0, 0, 0] )
__snake_case = x.copy()
self.assertEqual(str(a__ ) , str(a__ ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a__ ) , '''(0,1,0)''' )
def a (self : int ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a__ , a__ ) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a__ , a__ ) )
def a (self : str ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__snake_case = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def a (self : Any ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(a__ ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def a (self : Optional[int] ):
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 592 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
class __lowercase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 3 , UpperCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , UpperCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , UpperCamelCase_ : Tuple[int] = (64,) , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = "silu" , UpperCamelCase_ : int = 3 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : int = 32 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : float = 0.18215 , UpperCamelCase_ : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
__A = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
__A = vq_embed_dim if vq_embed_dim is not None else latent_channels
__A = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
__A = VectorQuantizer(UpperCamelCase_ , UpperCamelCase_ , beta=0.25 , remap=UpperCamelCase_ , sane_index_shape=UpperCamelCase_ )
__A = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
# pass init params to Decoder
__A = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , norm_type=UpperCamelCase_ , )
@apply_forward_hook
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ):
"""simple docstring"""
__A = self.encoder(UpperCamelCase_ )
__A = self.quant_conv(UpperCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase_ )
@apply_forward_hook
def lowerCAmelCase_ ( self : List[str] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True ):
"""simple docstring"""
if not force_not_quantize:
__A , __A , __A = self.quantize(UpperCamelCase_ )
else:
__A = h
__A = self.post_quant_conv(UpperCamelCase_ )
__A = self.decoder(UpperCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : bool = True ):
"""simple docstring"""
__A = sample
__A = self.encode(UpperCamelCase_ ).latents
__A = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
| 199 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
return x + 2
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """x = 3"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3} )
__A = """x = y"""
__A = {"""y""": 5}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 5, """y""": 5} )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """y = add_two(x)"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
# Won't work without the tool
with CaptureStdout() as out:
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = """x = 3"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3} )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = """test_dict = {'x': x, 'y': add_two(x)}"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = """x = 3\ny = 5"""
__A = {}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 5} )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """text = f'This is x: {x}.'"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """text""": """This is x: 3."""} )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = """if x <= 3:\n y = 2\nelse:\n y = 5"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 2} )
__A = {"""x""": 8}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 8, """y""": 5} )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = """test_list = [x, add_two(x)]"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , [3, 5] )
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = """y = x"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {} , state=UpperCamelCase_ )
assert result == 3
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """y""": 3} )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = """test_list = [x, add_two(x)]\ntest_list[1]"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_list""": [3, 5]} )
__A = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
__A = {"""x""": 3}
__A = evaluate(UpperCamelCase_ , {"""add_two""": add_two} , state=UpperCamelCase_ )
assert result == 5
self.assertDictEqual(UpperCamelCase_ , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = """x = 0\nfor i in range(3):\n x = i"""
__A = {}
__A = evaluate(UpperCamelCase_ , {"""range""": range} , state=UpperCamelCase_ )
assert result == 2
self.assertDictEqual(UpperCamelCase_ , {"""x""": 2, """i""": 2} )
| 199 | 1 |
"""simple docstring"""
import random
def __A ( a_ :int , a_ :float , a_ :bool = False) -> dict:
__a : dict = {i: [] for i in range(a_)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_):
for j in range(i + 1 , a_):
if random.random() < probability:
graph[i].append(a_)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_)
return graph
def __A ( a_ :int) -> dict:
return {
i: [j for j in range(a_) if i != j] for i in range(a_)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "efficientnet"
def __init__( self , a_ = 3 , a_ = 6_0_0 , a_ = 2.0 , a_ = 3.1 , a_ = 8 , a_ = [3, 3, 5, 3, 5, 5, 3] , a_ = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , a_ = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , a_ = [] , a_ = [1, 2, 2, 2, 1, 2, 1] , a_ = [1, 2, 2, 3, 3, 4, 1] , a_ = [1, 6, 6, 6, 6, 6, 6] , a_ = 0.25 , a_ = "swish" , a_ = 2_5_6_0 , a_ = "mean" , a_ = 0.02 , a_ = 0.001 , a_ = 0.99 , a_ = 0.5 , a_ = 0.2 , **a_ , ):
super().__init__(**a_ )
a_ : Tuple = num_channels
a_ : Any = image_size
a_ : Dict = width_coefficient
a_ : int = depth_coefficient
a_ : Optional[Any] = depth_divisor
a_ : Optional[int] = kernel_sizes
a_ : Dict = in_channels
a_ : Dict = out_channels
a_ : Union[str, Any] = depthwise_padding
a_ : Any = strides
a_ : Optional[int] = num_block_repeats
a_ : Tuple = expand_ratios
a_ : str = squeeze_expansion_ratio
a_ : Optional[int] = hidden_act
a_ : List[str] = hidden_dim
a_ : int = pooling_type
a_ : Optional[int] = initializer_range
a_ : List[Any] = batch_norm_eps
a_ : Tuple = batch_norm_momentum
a_ : List[Any] = dropout_rate
a_ : Dict = drop_connect_rate
a_ : str = sum(a_ ) * 4
class snake_case_ ( a_ ):
__lowerCAmelCase = version.parse("1.11" )
@property
def snake_case_ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case_ ( self ):
return 1e-5 | 237 | 0 |
__UpperCAmelCase : List[Any] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCAmelCase : List[str] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__UpperCAmelCase : Union[str, Any] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCamelCase : Tuple = year // 1_0_0
UpperCamelCase : Union[str, Any] = (5 * (century % 4) + 2) % 7
UpperCamelCase : List[str] = year % 1_0_0
UpperCamelCase : Union[str, Any] = centurian % 1_2
UpperCamelCase : Dict = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCamelCase : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCamelCase : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
from __future__ import annotations
def a ( SCREAMING_SNAKE_CASE_ : list[int] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
UpperCamelCase , UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
# Compute the variables
UpperCamelCase : Union[str, Any] = _max - _min + 1
UpperCamelCase , UpperCamelCase : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCamelCase : Optional[int] = i - _min
UpperCamelCase : Any = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCamelCase : str = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
while holes_repeat[i] > 0:
UpperCamelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase : Any = input("Enter numbers separated by comma:\n")
__UpperCAmelCase : int = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 643 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = '''▁'''
a = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Any = BertGenerationTokenizer
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = True
def lowerCAmelCase_ ( self : Tuple ):
super().setUp()
_A = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Tuple ):
_A = '<s>'
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(_UpperCAmelCase ) , 1_002 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def lowerCAmelCase_ ( self : List[str] ):
_A = BertGenerationTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
_A = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
_A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_A = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_A = 'Hello World!'
_A = [18_536, 2_260, 101]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
_A = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
_A = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def lowerCAmelCase_ ( self : Tuple ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
_A = list(self.big_tokenizer.get_vocab().keys() )[:10]
_A = ' '.join(_UpperCAmelCase )
_A = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors='pt' , return_token_type_ids=_UpperCAmelCase )
_A = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=_UpperCAmelCase )
_A = BertGenerationConfig()
_A = BertGenerationEncoder(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
# fmt: off
_A = {'input_ids': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='google/bert_for_seq_generation_L-24_bbc_encoder' , revision='c817d1fd1be2ffa69431227a1fe320544943d4db' , )
| 7 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__A = logging.get_logger(__name__)
__A = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = "imagegpt"
A_ = ["past_key_values"]
A_ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Union[str, Any] , __A: int=5_12 + 1 , __A: str=32 * 32 , __A: Tuple=5_12 , __A: Any=24 , __A: Dict=8 , __A: str=None , __A: Any="quick_gelu" , __A: Tuple=0.1 , __A: Optional[Any]=0.1 , __A: Any=0.1 , __A: Tuple=1e-5 , __A: Optional[int]=0.02 , __A: List[Any]=True , __A: Any=True , __A: Optional[Any]=False , __A: Optional[int]=False , __A: Optional[int]=False , **__A: str , ) -> List[Any]:
_A = vocab_size
_A = n_positions
_A = n_embd
_A = n_layer
_A = n_head
_A = n_inner
_A = activation_function
_A = resid_pdrop
_A = embd_pdrop
_A = attn_pdrop
_A = layer_norm_epsilon
_A = initializer_range
_A = scale_attn_weights
_A = use_cache
_A = scale_attn_by_inverse_layer_idx
_A = reorder_and_upcast_attn
_A = tie_word_embeddings
super().__init__(tie_word_embeddings=__A , **__A )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
@property
def __A ( self: Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __A ( self: Any , __A: "FeatureExtractionMixin" , __A: int = 1 , __A: int = -1 , __A: bool = False , __A: Optional["TensorType"] = None , __A: int = 3 , __A: int = 32 , __A: int = 32 , ) -> Mapping[str, Any]:
_A = self._generate_dummy_images(__A , __A , __A , __A )
_A = dict(preprocessor(images=__A , return_tensors=__A ) )
return inputs
| 484 | 0 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "encodec"
def __init__( self : Tuple , __snake_case : Optional[int]=[1.5, 3.0, 6.0, 12.0, 24.0] , __snake_case : List[str]=2_40_00 , __snake_case : Any=1 , __snake_case : str=False , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=1_28 , __snake_case : List[str]=32 , __snake_case : List[str]=1 , __snake_case : List[Any]=[8, 5, 4, 2] , __snake_case : str="weight_norm" , __snake_case : Tuple=7 , __snake_case : Tuple=7 , __snake_case : str=3 , __snake_case : Optional[int]=2 , __snake_case : List[str]=True , __snake_case : Dict="reflect" , __snake_case : Any=2 , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=1.0 , __snake_case : str=10_24 , __snake_case : str=None , __snake_case : List[str]=True , **__snake_case : Any , )-> Any:
snake_case = target_bandwidths
snake_case = sampling_rate
snake_case = audio_channels
snake_case = normalize
snake_case = chunk_length_s
snake_case = overlap
snake_case = hidden_size
snake_case = num_filters
snake_case = num_residual_layers
snake_case = upsampling_ratios
snake_case = norm_type
snake_case = kernel_size
snake_case = last_kernel_size
snake_case = residual_kernel_size
snake_case = dilation_growth_rate
snake_case = use_causal_conv
snake_case = pad_mode
snake_case = compress
snake_case = num_lstm_layers
snake_case = trim_right_ratio
snake_case = codebook_size
snake_case = codebook_dim if codebook_dim is not None else hidden_size
snake_case = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__snake_case )
@property
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase ( self : int )-> int:
snake_case = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase ( self : str )-> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 709 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
if len(__lowerCAmelCase ) == 0:
return []
snake_case , snake_case = min(__lowerCAmelCase ), max(__lowerCAmelCase )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(__lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__lowerCAmelCase )
return [v for bucket in buckets for v in sorted(__lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 517 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if not (isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
__lowercase : List[str] = len(__UpperCamelCase )
__lowercase : Dict = len(__UpperCamelCase )
__lowercase : Optional[Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
__lowercase : Optional[Any] = 0
__lowercase : Union[str, Any] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
__lowercase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
__lowercase : List[str] = i
__lowercase : List[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
__lowercase : Dict = float(embedding_dim // 2 )
__lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
__lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 )
# scale embeddings
__lowercase : Optional[int] = scale * emb
if flip_sin_to_cos:
__lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 )
else:
__lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 )
__lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] )
return signal
class UpperCAmelCase_ ( nn.Module ):
UpperCamelCase =32
UpperCamelCase =jnp.floataa
@nn.compact
def __call__( self , UpperCamelCase_ ) -> Optional[int]:
__lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ )
__lowercase : str = nn.silu(UpperCamelCase_ )
__lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ )
return temb
class UpperCAmelCase_ ( nn.Module ):
UpperCamelCase =32
UpperCamelCase =False
UpperCamelCase =1
@nn.compact
def __call__( self , UpperCamelCase_ ) -> Optional[int]:
return get_sinusoidal_embeddings(
UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 76 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = '''ResNetConfig'''
# Base docstring
__lowerCAmelCase = '''microsoft/resnet-50'''
__lowerCAmelCase = [1, 20_48, 7, 7]
# Image classification docstring
__lowerCAmelCase = '''microsoft/resnet-50'''
__lowerCAmelCase = '''tiger cat'''
__lowerCAmelCase = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Tuple = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
lowercase__: str = nn.BatchNormad(lowerCAmelCase__ )
lowercase__: Union[str, Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: List[str] = self.convolution(lowerCAmelCase__ )
lowercase__: int = self.normalization(lowerCAmelCase__ )
lowercase__: Any = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase__: Any = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase__: Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase__: List[str] = config.num_channels
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
lowercase__: Any = self.embedder(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.pooler(lowerCAmelCase__ )
return embedding
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 ) -> Tuple:
'''simple docstring'''
super().__init__()
lowercase__: Optional[Any] = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase__: List[Any] = nn.BatchNormad(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: Union[str, Any] = self.convolution(lowerCAmelCase__ )
lowercase__: List[str] = self.normalization(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" ) -> Any:
'''simple docstring'''
super().__init__()
lowercase__: str = in_channels != out_channels or stride != 1
lowercase__: Union[str, Any] = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: str = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
lowercase__: str = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: Tuple = hidden_state
lowercase__: Tuple = self.layer(lowerCAmelCase__ )
lowercase__: Any = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Union[str, Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , lowerCAmelCase__ = 4 ) -> int:
'''simple docstring'''
super().__init__()
lowercase__: List[str] = in_channels != out_channels or stride != 1
lowercase__: Union[str, Any] = out_channels // reduction
lowercase__: List[Any] = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowercase__: Any = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
lowercase__: Union[str, Any] = ACTaFN[activation]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Any = hidden_state
lowercase__: Union[str, Any] = self.layer(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowercase__: Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , ) -> str:
'''simple docstring'''
super().__init__()
lowercase__: List[Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
lowercase__: Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Tensor:
'''simple docstring'''
lowercase__: List[str] = input
for layer in self.layers:
lowercase__: int = layer(lowerCAmelCase__ )
return hidden_state
class __a ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase__: Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__: List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__: Tuple = hidden_states + (hidden_state,)
lowercase__: int = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowercase__: Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __a ( __UpperCamelCase ):
__lowercase : Dict = ResNetConfig
__lowercase : List[Any] = 'resnet'
__lowercase : Union[str, Any] = 'pixel_values'
__lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: int = value
__lowerCAmelCase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__lowerCAmelCase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __UpperCamelCase , )
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: str = config
lowercase__: Any = ResNetEmbeddings(lowerCAmelCase__ )
lowercase__: Tuple = ResNetEncoder(lowerCAmelCase__ )
lowercase__: Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowercase__: Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: Union[str, Any] = self.embedder(lowerCAmelCase__ )
lowercase__: Union[str, Any] = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Tuple = encoder_outputs[0]
lowercase__: str = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __UpperCamelCase , )
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowercase__: Tuple = config.num_labels
lowercase__: Dict = ResNetModel(lowerCAmelCase__ )
# classification head
lowercase__: List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowercase__: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: List[str] = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__: Union[str, Any] = self.classifier(lowerCAmelCase__ )
lowercase__: Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: List[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Any = 'single_label_classification'
else:
lowercase__: Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__: Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: Union[str, Any] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowercase__: Union[str, Any] = CrossEntropyLoss()
lowercase__: Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: Optional[Any] = BCEWithLogitsLoss()
lowercase__: Optional[int] = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
lowercase__: str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __UpperCamelCase , )
class __a ( __UpperCamelCase , __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
lowercase__: Union[str, Any] = [config.embedding_size] + config.hidden_sizes
lowercase__: Tuple = ResNetEmbeddings(lowerCAmelCase__ )
lowercase__: List[str] = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> BackboneOutput:
'''simple docstring'''
lowercase__: int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: Tuple = self.embedder(lowerCAmelCase__ )
lowercase__: Optional[Any] = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
lowercase__: Optional[int] = outputs.hidden_states
lowercase__: List[Any] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase__: str = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 335 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCAmelCase = threading.Lock()
__lowerCAmelCase = None
__lowerCAmelCase = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__lowerCAmelCase = logging.WARNING
__lowerCAmelCase = True
def snake_case_ ( ) -> Optional[Any]:
lowercase__: Optional[int] = os.getenv('TRANSFORMERS_VERBOSITY' , snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '
f'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def snake_case_ ( ) -> str:
return __name__.split('.' )[0]
def snake_case_ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def snake_case_ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowercase__: str = logging.StreamHandler() # Set sys.stderr as stream.
lowercase__: Optional[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowercase__: Any = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowercase__: Union[str, Any] = False
def snake_case_ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
lowercase__: Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowercase__: List[str] = None
def snake_case_ ( ) -> Union[str, Any]:
return log_levels
def snake_case_ ( snake_case = None ) -> logging.Logger:
if name is None:
lowercase__: Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case )
def snake_case_ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def snake_case_ ( snake_case ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case )
def snake_case_ ( ) -> List[str]:
return set_verbosity(snake_case )
def snake_case_ ( ) -> List[Any]:
return set_verbosity(snake_case )
def snake_case_ ( ) -> Union[str, Any]:
return set_verbosity(snake_case )
def snake_case_ ( ) -> Any:
return set_verbosity(snake_case )
def snake_case_ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def snake_case_ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def snake_case_ ( snake_case ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case )
def snake_case_ ( snake_case ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case )
def snake_case_ ( ) -> None:
_configure_library_root_logger()
lowercase__: Optional[Any] = False
def snake_case_ ( ) -> None:
_configure_library_root_logger()
lowercase__: Optional[int] = True
def snake_case_ ( ) -> None:
lowercase__: str = _get_library_root_logger().handlers
for handler in handlers:
lowercase__: Any = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(snake_case )
def snake_case_ ( ) -> None:
lowercase__: Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case )
def snake_case_ ( self , *snake_case , **snake_case ) -> Union[str, Any]:
lowercase__: Any = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , snake_case )
if no_advisory_warnings:
return
self.warning(*snake_case , **snake_case )
__lowerCAmelCase = warning_advice
@functools.lru_cache(snake_case )
def snake_case_ ( self , *snake_case , **snake_case ) -> Any:
self.warning(*snake_case , **snake_case )
__lowerCAmelCase = warning_once
class __a :
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]: # pylint: disable=unused-argument
'''simple docstring'''
lowercase__: Union[str, Any] = args[0] if args else None
def __iter__( self ) -> List[Any]:
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> List[str]:
'''simple docstring'''
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return
class __a :
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__: str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCAmelCase = _tqdm_cls()
def snake_case_ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def snake_case_ ( ) -> Union[str, Any]:
global _tqdm_active
lowercase__: List[str] = True
hf_hub_utils.enable_progress_bars()
def snake_case_ ( ) -> int:
global _tqdm_active
lowercase__: List[Any] = False
hf_hub_utils.disable_progress_bars()
| 335 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase__ ( _lowerCamelCase ) ->int: # picklable for multiprocessing
return x.sum()
def lowerCamelCase__ ( _lowerCamelCase ) ->Optional[int]: # picklable for multiprocessing
return i + 1
@dataclass
class _a :
"""simple docstring"""
snake_case =4_2
snake_case =4_2
class _a ( a__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={}
_UpperCAmelCase =[]
_UpperCAmelCase =1
_UpperCAmelCase =[1, 2]
_UpperCAmelCase ={"""a""": 1, """b""": 2}
_UpperCAmelCase ={"""a""": [1, 2], """b""": [3, 4]}
_UpperCAmelCase ={"""a""": {"""1""": 1}, """b""": 2}
_UpperCAmelCase ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
_UpperCAmelCase ={}
_UpperCAmelCase =[]
_UpperCAmelCase =2
_UpperCAmelCase =[2, 3]
_UpperCAmelCase ={"""a""": 2, """b""": 3}
_UpperCAmelCase ={"""a""": [2, 3], """b""": [4, 5]}
_UpperCAmelCase ={"""a""": {"""1""": 2}, """b""": 3}
_UpperCAmelCase ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
_UpperCAmelCase =2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
_UpperCAmelCase ={"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
_UpperCAmelCase ={"""a""": 2, """b""": 0, """c""": 2}
_UpperCAmelCase ={
"""a""": np.eye(2 ).astype(__a ),
"""b""": np.zeros(3 ).astype(__a ),
"""c""": np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda _snake_case : x + 1 , __a , num_proc=__a )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ={"""a""": 1, """b""": 2}
_UpperCAmelCase ={"""a""": 3, """b""": 4}
_UpperCAmelCase ={"""a""": 5, """b""": 6}
_UpperCAmelCase =sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def SCREAMING_SNAKE_CASE ( self ):
class _a :
"""simple docstring"""
snake_case ="""bar"""
_UpperCAmelCase =Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__a , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->int:
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
_UpperCAmelCase ={F"{i}": i for i in range(A__ )}
_UpperCAmelCase =map_nested(lambda _lowerCamelCase : x + 10 , A__ , num_proc=A__ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _a ( a__ ):
"""simple docstring"""
@require_tf
def SCREAMING_SNAKE_CASE ( self ):
import tensorflow as tf
from tensorflow.keras import layers
_UpperCAmelCase =layers.Dense(2 )
def gen_random_output():
_UpperCAmelCase =tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
_UpperCAmelCase =gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
_UpperCAmelCase =gen_random_output()
_UpperCAmelCase =gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def SCREAMING_SNAKE_CASE ( self ):
import torch
def gen_random_output():
_UpperCAmelCase =torch.nn.Linear(3 , 2 )
_UpperCAmelCase =torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
_UpperCAmelCase =gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
_UpperCAmelCase =gen_random_output()
_UpperCAmelCase =gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def SCREAMING_SNAKE_CASE ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_UpperCAmelCase =gen_random_output()
with temp_seed(42 ):
_UpperCAmelCase =gen_random_output()
_UpperCAmelCase =gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def lowerCamelCase__ ( _lowerCamelCase ) ->List[Any]:
_UpperCAmelCase =NestedDataStructure(A__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Optional[Any]:
_UpperCAmelCase =NestedDataStructure(A__ ).flatten()
assert output == expected_output
def lowerCamelCase__ ( ) ->Optional[Any]:
_UpperCAmelCase =A(x=1 , y="foobar" )
_UpperCAmelCase ={"""x""": 1, """y""": """foobar"""}
assert asdict(A__ ) == expected_output
_UpperCAmelCase ={"""a""": {"""b""": A(x=10 , y="foo" )}, """c""": [A(x=20 , y="bar" )]}
_UpperCAmelCase ={"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(A__ ) == expected_output
with pytest.raises(A__ ):
asdict([1, A(x=10 , y="foo" )] )
def lowerCamelCase__ ( _lowerCamelCase ) ->Any:
return text.split()
def lowerCamelCase__ ( _lowerCamelCase ) ->List[Any]:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def lowerCamelCase__ ( ) ->List[str]:
with Pool(2 ) as pool:
_UpperCAmelCase =list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(A__ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
_UpperCAmelCase =list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(A__ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
_UpperCAmelCase =[]
for yield_time, content in iflatmap_unordered(
A__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(A__ )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(A__ ) == 4
| 408 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : str = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "dpr"
def __init__( self : Union[str, Any] , __a : Optional[Any]=30_522 , __a : List[Any]=768 , __a : List[Any]=12 , __a : Dict=12 , __a : Union[str, Any]=3_072 , __a : Any="gelu" , __a : Any=0.1 , __a : Any=0.1 , __a : Tuple=512 , __a : int=2 , __a : Optional[Any]=0.02 , __a : List[Any]=1e-12 , __a : int=0 , __a : int="absolute" , __a : int = 0 , **__a : Optional[int] , ) ->Tuple:
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[Any] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : List[str] = projection_dim
lowerCamelCase_ : Optional[int] = position_embedding_type
| 278 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCamelCase = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCamelCase = {"facebook/blenderbot-3B": 1_28}
class _lowercase ( __UpperCAmelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
_lowerCamelCase = BlenderbotTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="replace" , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
__magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
__magic_name__ = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
__magic_name__ = add_prefix_space
__magic_name__ = pre_tok_class(**UpperCamelCase_ )
__magic_name__ = add_prefix_space
__magic_name__ = '''post_processor'''
__magic_name__ = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
__magic_name__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ = tuple(state['''sep'''] )
if "cls" in state:
__magic_name__ = tuple(state['''cls'''] )
__magic_name__ = False
if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
__magic_name__ = add_prefix_space
__magic_name__ = True
if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets:
__magic_name__ = trim_offsets
__magic_name__ = True
if changes_to_apply:
__magic_name__ = getattr(UpperCamelCase_ , state.pop('''type''' ) )
__magic_name__ = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
__magic_name__ = value
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__magic_name__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__magic_name__ = kwargs.get('''is_split_into_words''' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
return token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
__magic_name__ = ''' '''.join(UpperCamelCase_ )
__magic_name__ = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
__magic_name__ = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 190 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
__magic_name__ = start
__magic_name__ = end
__magic_name__ = val
__magic_name__ = (start + end) // 2
__magic_name__ = left
__magic_name__ = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = collection
__magic_name__ = function
if self.collection:
__magic_name__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__magic_name__ = (start + end) // 2
__magic_name__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == i:
__magic_name__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
if self.root is not None:
__magic_name__ = Queue()
queue.put(self.root )
while not queue.empty():
__magic_name__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 190 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """canine"""
def __init__( self :Tuple , lowerCamelCase_ :Any=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Any=12 , lowerCamelCase_ :str=30_72 , lowerCamelCase_ :int="gelu" , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Tuple=1_63_84 , lowerCamelCase_ :Dict=16 , lowerCamelCase_ :int=0.0_2 , lowerCamelCase_ :Union[str, Any]=1E-12 , lowerCamelCase_ :str=0 , lowerCamelCase_ :Optional[Any]=0xE000 , lowerCamelCase_ :List[Any]=0xE001 , lowerCamelCase_ :str=4 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Any=8 , lowerCamelCase_ :Tuple=1_63_84 , lowerCamelCase_ :Tuple=1_28 , **lowerCamelCase_ :Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
# Character config:
SCREAMING_SNAKE_CASE : Optional[int] = downsampling_rate
SCREAMING_SNAKE_CASE : List[str] = upsampling_kernel_size
SCREAMING_SNAKE_CASE : Tuple = num_hash_functions
SCREAMING_SNAKE_CASE : Optional[Any] = num_hash_buckets
SCREAMING_SNAKE_CASE : str = local_transformer_stride
| 698 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ : Optional[int] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __A ( a_ : Dict )-> str:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __A ( a_ : Dict )-> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __A ( a_ : Union[str, Any] )-> List[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(a_ , id=a_ )
def __A ( a_ : Dict , a_ : List[str] )-> Dict:
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE : List[str] = 0
# Doctest custom flag to ignore output.
lowerCamelCase__ : Tuple = doctest.register_optionflag("IGNORE_RESULT")
lowerCamelCase__ : Optional[int] = doctest.OutputChecker
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :Dict , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] ) -> Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : str = CustomOutputChecker
lowerCamelCase__ : Any = HfDoctestModule
lowerCamelCase__ : int = HfDocTestParser
| 698 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Optional[int] = ['image_processor', 'tokenizer']
__lowercase : Optional[int] = 'BridgeTowerImageProcessor'
__lowercase : int = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self:Union[str, Any] , _a:Union[str, Any] , _a:Any ):
super().__init__(_a , _a )
def __call__( self:str , _a:Tuple , _a:Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _a:bool = True , _a:Union[bool, str, PaddingStrategy] = False , _a:Union[bool, str, TruncationStrategy] = None , _a:Optional[int] = None , _a:int = 0 , _a:Optional[int] = None , _a:Optional[bool] = None , _a:Optional[bool] = None , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = False , _a:bool = True , _a:Optional[Union[str, TensorType]] = None , **_a:Dict , ):
snake_case__ = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
snake_case__ = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , *_a:Optional[int] , **_a:Optional[Any] ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , *_a:Tuple , **_a:List[str] ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE__ ( self:int ):
snake_case__ = self.tokenizer.model_input_names
snake_case__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 33 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> list:
_a : Tuple =len(_UpperCAmelCase )
_a : str =[]
for i in range(len(_UpperCAmelCase ) - pat_len + 1 ):
_a : int =True
for j in range(_UpperCAmelCase ):
if s[i + j] != pattern[j]:
_a : int =False
break
if match_found:
position.append(_UpperCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC'''))
| 694 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_snake_case = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def snake_case ( _a: str )-> dict[str, int]:
'''simple docstring'''
lowerCamelCase__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case ( _a: tuple )-> str:
'''simple docstring'''
return x[0]
def snake_case ( _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = get_letter_count(_a )
lowerCamelCase__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_a )
lowerCamelCase__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_a )
lowerCamelCase__ = ''.join(freq_to_letter[freq] )
lowerCamelCase__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_a , reverse=_a )
lowerCamelCase__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_a )
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = get_frequency_order(_a )
lowerCamelCase__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : int = 1_000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"{solution() = }")
| 289 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="shi-labs/oneformer_demo" ) -> Tuple:
'''simple docstring'''
with open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) as f:
_UpperCAmelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {}
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = []
for key, info in class_info.items():
_UpperCAmelCase : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Tuple = thing_ids
_UpperCAmelCase : str = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Tuple , A : str=7 , A : Union[str, Any]=3 , A : Union[str, Any]=3_0 , A : Dict=4_0_0 , A : List[str]=None , A : str=True , A : Union[str, Any]=True , A : Optional[Any]=[0.5, 0.5, 0.5] , A : str=[0.5, 0.5, 0.5] , A : Optional[Any]=1_0 , A : Optional[int]=False , A : int=2_5_5 , A : List[Any]="shi-labs/oneformer_demo" , A : int="ade20k_panoptic.json" , A : str=1_0 , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : str = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : List[Any] = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
_UpperCAmelCase : Optional[Any] = do_normalize
_UpperCAmelCase : Optional[int] = image_mean
_UpperCAmelCase : Dict = image_std
_UpperCAmelCase : Any = class_info_file
_UpperCAmelCase : Optional[int] = prepare_metadata(A , A )
_UpperCAmelCase : Any = num_text
_UpperCAmelCase : Dict = repo_path
# for the post_process_functions
_UpperCAmelCase : str = 2
_UpperCAmelCase : Any = 1_0
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Optional[Any] = do_reduce_labels
_UpperCAmelCase : Any = ignore_index
def snake_case_ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case_ ( self : str , A : int , A : Optional[Any]=False ):
if not batched:
_UpperCAmelCase : List[str] = image_inputs[0]
if isinstance(A , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : Tuple = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : Dict = self.size["shortest_edge"]
_UpperCAmelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[str] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : int = max(A , key=lambda A : item[0] )[0]
_UpperCAmelCase : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def snake_case_ ( self : List[str] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__SCREAMING_SNAKE_CASE : Tuple = image_processing_class
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Optional[Any] = OneFormerImageProcessorTester(self )
@property
def snake_case_ ( self : Optional[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "ignore_index" ) )
self.assertTrue(hasattr(A , "class_info_file" ) )
self.assertTrue(hasattr(A , "num_text" ) )
self.assertTrue(hasattr(A , "repo_path" ) )
self.assertTrue(hasattr(A , "metadata" ) )
self.assertTrue(hasattr(A , "do_reduce_labels" ) )
def snake_case_ ( self : List[Any] ):
pass
def snake_case_ ( self : Union[str, Any] ):
# Initialize image_processor
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[Any] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : str ):
# Initialize image_processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : List[str] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] ):
# Initialize image_processor
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[int] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] , A : Tuple=False , A : Optional[Any]=False , A : int="np" ):
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : List[str] = self.image_processing_tester.num_labels
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
_UpperCAmelCase : Union[str, Any] = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[int] = list(range(A ) ) * 2
_UpperCAmelCase : Union[str, Any] = dict(enumerate(A ) )
_UpperCAmelCase : Tuple = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : Optional[int] = [Image.fromarray(A ) for annotation in annotations]
_UpperCAmelCase : int = image_processor(
A , ["semantic"] * len(A ) , A , return_tensors="pt" , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def snake_case_ ( self : Any ):
pass
def snake_case_ ( self : Dict ):
def common(A : List[Any]=False , A : List[str]=None ):
_UpperCAmelCase : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
_UpperCAmelCase : Optional[int] = inputs["mask_labels"]
_UpperCAmelCase : str = inputs["class_labels"]
_UpperCAmelCase : List[str] = inputs["pixel_values"]
_UpperCAmelCase : Any = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type="pil" )
common(is_instance_map=A , segmentation_type="pil" )
def snake_case_ ( self : int ):
_UpperCAmelCase : Optional[Any] = np.zeros((2_0, 5_0) )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : List[str] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 289 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowerCamelCase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
lowerCamelCase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCamelCase__ = [4, 4, 4, 4]
lowerCamelCase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCamelCase__ = [3, 3, 3, 3]
else:
lowerCamelCase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCamelCase__ = 96
elif "small" in model_name:
lowerCamelCase__ = 96
elif "base" in model_name:
lowerCamelCase__ = 128
elif "large" in model_name:
lowerCamelCase__ = 192
elif "xlarge" in model_name:
lowerCamelCase__ = 256
elif "huge" in model_name:
lowerCamelCase__ = 352
# set label information
lowerCamelCase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowerCamelCase__ = '''imagenet-22k-id2label.json'''
else:
lowerCamelCase__ = '''imagenet-1k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = FocalNetConfig(
embed_dim=__snake_case ,depths=__snake_case ,focal_levels=__snake_case ,focal_windows=__snake_case ,use_conv_embed=__snake_case ,idalabel=__snake_case ,labelaid=__snake_case ,use_post_layernorm=__snake_case ,use_layerscale=__snake_case ,)
return config
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
lowerCamelCase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowerCamelCase__ = name.replace('''encoder.layers''' ,'''encoder.stages''' )
if "downsample.proj" in name:
lowerCamelCase__ = name.replace('''downsample.proj''' ,'''downsample.projection''' )
if "blocks" in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCamelCase__ = name.replace('''modulation.f''' ,'''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCamelCase__ = name.replace('''modulation.h''' ,'''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCamelCase__ = name.replace('''modulation.proj''' ,'''modulation.projection_out''' )
if name == "norm.weight":
lowerCamelCase__ = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase__ = '''layernorm.bias'''
if "head" in name:
lowerCamelCase__ = name.replace('''head''' ,'''classifier''' )
else:
lowerCamelCase__ = '''focalnet.''' + name
return name
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case=False ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowerCamelCase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' ,__snake_case )
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__snake_case )
lowerCamelCase__ = val
lowerCamelCase__ = get_focalnet_config(__snake_case )
lowerCamelCase__ = FocalNetForImageClassification(__snake_case )
model.eval()
# load state dict
model.load_state_dict(__snake_case )
# verify conversion
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = BitImageProcessor(
do_resize=__snake_case ,size={'''shortest_edge''': 256} ,resample=PILImageResampling.BILINEAR ,do_center_crop=__snake_case ,crop_size=224 ,do_normalize=__snake_case ,image_mean=__snake_case ,image_std=__snake_case ,)
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
lowerCamelCase__ = processor(images=__snake_case ,return_tensors='''pt''' )
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] ,std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase__ = image_transforms(__snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values ,__snake_case ,atol=1E-4 )
lowerCamelCase__ = model(**__snake_case )
lowerCamelCase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' ,outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCamelCase__ = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
lowerCamelCase__ = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
lowerCamelCase__ = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
lowerCamelCase__ = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
lowerCamelCase__ = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
lowerCamelCase__ = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] ,__snake_case ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...' )
model.push_to_hub(F'{model_name}' )
processor.push_to_hub(F'{model_name}' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
_a = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 715 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : Tuple = "▁"
_SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class _snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
super().setUp()
__magic_name__ : Union[str, Any] = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = "<s>"
__magic_name__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Tuple:
__magic_name__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCamelCase__ ) , 1002 )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
__magic_name__ : List[str] = BertGenerationTokenizer(lowerCamelCase__ , keep_accents=lowerCamelCase__ )
__magic_name__ : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [285, 46, 10, 170, 382] , )
__magic_name__ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__magic_name__ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(
lowerCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
__magic_name__ : Any = "Hello World!"
__magic_name__ : str = [1_8536, 2260, 101]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Any:
__magic_name__ : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__magic_name__ : Optional[Any] = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase__ , self.big_tokenizer.encode(lowerCamelCase__ ) )
@require_torch
@slow
def lowerCAmelCase__ ( self: int ) -> List[str]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__magic_name__ : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : Optional[Any] = " ".join(lowerCamelCase__ )
__magic_name__ : str = self.big_tokenizer.encode_plus(lowerCamelCase__ , return_tensors="pt" , return_token_type_ids=lowerCamelCase__ )
__magic_name__ : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase__ )
__magic_name__ : Optional[Any] = BertGenerationConfig()
__magic_name__ : List[Any] = BertGenerationEncoder(lowerCamelCase__ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase__ )
model(**lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = {"input_ids": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , ) | 436 |
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 1000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 203 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def __A ( a_ :int) -> List[str]:
if not isinstance(__lowercase , __lowercase):
__a : List[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowercase)
if is_prime(__lowercase) and is_prime(number + 2):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A = tuple[int, int]
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Any = weight
def _lowerCamelCase ( self ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : List[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : str = edge
__a : int = weight
subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase )
return subgraph
def __A ( a_ :str = "p107_network.txt") -> int:
__a : str = os.path.abspath(os.path.dirname(a_))
__a : str = os.path.join(a_ , a_)
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(a_) as f:
__a : Any = f.read().strip().split('''\n''')
__a : Union[str, Any] = [line.split(''',''') for line in data]
for edgea in range(1 , len(a_)):
for edgea in range(a_):
if adjaceny_matrix[edgea][edgea] != "-":
__a : int = int(adjaceny_matrix[edgea][edgea])
__a : Graph = Graph(set(range(len(a_))) , a_)
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values())
__a : int = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }') | 101 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A : List[Any] = logging.getLogger(__name__)
def UpperCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.path.exists(__magic_name__ ):
if os.path.exists(os.path.join(__magic_name__ , """config.json""" ) ) and os.path.isfile(
os.path.join(__magic_name__ , """config.json""" ) ):
os.remove(os.path.join(__magic_name__ , """config.json""" ) )
if os.path.exists(os.path.join(__magic_name__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(__magic_name__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(__magic_name__ , """pytorch_model.bin""" ) )
else:
os.makedirs(__magic_name__ )
model.save_pretrained(__magic_name__ )
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = 2
if unlogit:
lowercase__ = torch.pow(__magic_name__ , __magic_name__ )
lowercase__ = p * torch.log(__magic_name__ )
lowercase__ = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase ( __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(f'''{x + 1}''' for x in range(len(__magic_name__ ) ) ) )
for row in range(len(__magic_name__ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + """\t""".join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=None , __magic_name__ : int=False ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase__ = torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
lowercase__ = torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
if head_mask is None:
lowercase__ = torch.ones(__magic_name__ , __magic_name__ ).to(args.device )
head_mask.requires_grad_(requires_grad=__magic_name__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase__ = None
lowercase__ = 0.0
lowercase__ = 0.0
for step, inputs in enumerate(tqdm(__magic_name__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
lowercase__ = tuple(t.to(args.device ) for t in inputs )
((lowercase__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase__ = model(__magic_name__ , labels=__magic_name__ , head_mask=__magic_name__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase__ , lowercase__ , lowercase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__magic_name__ ):
lowercase__ = entropy(attn.detach() , __magic_name__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__magic_name__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase__ = 2
lowercase__ = torch.pow(torch.pow(__magic_name__ , __magic_name__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
lowercase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(__magic_name__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(__magic_name__ )
logger.info("""Head ranked by importance scores""" )
lowercase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase__ = torch.arange(
head_importance.numel() , device=args.device )
lowercase__ = head_ranks.view_as(__magic_name__ )
print_ad_tensor(__magic_name__ )
return attn_entropy, head_importance, total_loss
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ )
lowercase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , __magic_name__ , original_score * args.masking_threshold )
lowercase__ = torch.ones_like(__magic_name__ )
lowercase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase__ = original_score
while current_score >= original_score * args.masking_threshold:
lowercase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase__ = float("""Inf""" )
lowercase__ = head_importance.view(-1 ).sort()[1]
if len(__magic_name__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
lowercase__ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
lowercase__ = new_head_mask.view(-1 )
lowercase__ = 0.0
lowercase__ = new_head_mask.view_as(__magic_name__ )
lowercase__ = new_head_mask.clone().detach()
print_ad_tensor(__magic_name__ )
# Compute metric and head importance again
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , head_mask=__magic_name__ )
lowercase__ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , __magic_name__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(__magic_name__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ )
lowercase__ = 1 / loss
lowercase__ = datetime.now() - before_time
lowercase__ = sum(p.numel() for p in model.parameters() )
lowercase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__magic_name__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = [
v,
]
assert sum(len(__magic_name__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__magic_name__ )
lowercase__ = sum(p.numel() for p in model.parameters() )
lowercase__ = datetime.now()
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ , actually_pruned=__magic_name__ , )
lowercase__ = 1 / loss
lowercase__ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , __magic_name__ , __magic_name__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , __magic_name__ , __magic_name__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(__magic_name__ , args.output_dir )
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=__magic_name__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=__magic_name__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=__magic_name__ , type=__magic_name__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=__magic_name__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=__magic_name__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=__magic_name__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=__magic_name__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__magic_name__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=__magic_name__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=__magic_name__ , default=42 )
parser.add_argument("""--local_rank""" , type=__magic_name__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=__magic_name__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=__magic_name__ , default="""""" , help="""Can be used for distant debugging.""" )
lowercase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
lowercase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase__ = torch.device("""cuda""" , args.local_rank )
lowercase__ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase__ = nn.parallel.DistributedDataParallel(
__magic_name__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__magic_name__ )
elif args.n_gpu > 1:
lowercase__ = nn.DataParallel(__magic_name__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , __magic_name__ )
# Prepare dataset
lowercase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase__ = (torch.from_numpy(__magic_name__ ),)
lowercase__ = TensorDataset(*__magic_name__ )
lowercase__ = RandomSampler(__magic_name__ )
lowercase__ = DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase__ = mask_heads(__magic_name__ , __magic_name__ , __magic_name__ )
prune_heads(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 15 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
# load base model
SCREAMING_SNAKE_CASE : Dict = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : Any = load_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : Dict = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Tuple = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Any = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Dict = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(__lowerCAmelCase ) > -1:
try:
SCREAMING_SNAKE_CASE : List[Any] = curr_layer.__getattr__(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = layer_infos.pop(0 )
elif len(__lowerCAmelCase ) == 0:
break
except Exception:
if len(__lowerCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(__lowerCAmelCase )
else:
pair_keys.append(__lowerCAmelCase )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : str = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : List[Any] = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCAmelCase , __lowerCAmelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCAmelCase )
return pipeline
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : List[str] = args.base_model_path
_lowerCamelCase : str = args.checkpoint_path
_lowerCamelCase : Dict = args.dump_path
_lowerCamelCase : Any = args.lora_prefix_unet
_lowerCamelCase : List[Any] = args.lora_prefix_text_encoder
_lowerCamelCase : List[Any] = args.alpha
_lowerCamelCase : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowerCamelCase : Optional[Any] = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 352 | 0 |
lowerCAmelCase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: bytes ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
A__ = "".join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
A__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
A__ = b"=" * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
A__ = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> bytes:
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A__ = (
"argument should be a bytes-like object or ASCII string, "
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
A__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
A__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
A__ = encoded_data[:-padding]
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
A__ = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
A__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 626 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = field(default=snake_case , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
A__ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
A__ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
__lowerCamelCase = True
__lowerCamelCase = None
__lowerCamelCase = None
def __call__( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = "label" if "label" in features[0].keys() else "labels"
A__ = [feature.pop(lowercase ) for feature in features]
A__ = len(lowercase )
A__ = len(features[0]["input_ids"] )
A__ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase )] for feature in features
]
A__ = list(chain(*lowercase ) )
A__ = self.tokenizer.pad(
lowercase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
A__ = {k: v.view(lowercase , lowercase , -1 ) for k, v in batch.items()}
# Add back labels
A__ = torch.tensor(lowercase , dtype=torch.intaa )
return batch
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE_ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
A__ = {}
if data_args.train_file is not None:
A__ = data_args.train_file
if data_args.validation_file is not None:
A__ = data_args.validation_file
A__ = data_args.train_file.split("." )[-1]
A__ = load_dataset(
SCREAMING_SNAKE_CASE_ , data_files=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
A__ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
A__ = [F'ending{i}' for i in range(4 )]
A__ = "sent1"
A__ = "sent2"
if data_args.max_seq_length is None:
A__ = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
A__ = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
A__ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE_: Optional[Any] ):
A__ = [[context] * 4 for context in examples[context_name]]
A__ = examples[question_header_name]
A__ = [
[F'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(SCREAMING_SNAKE_CASE_ )
]
# Flatten out
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
A__ = list(chain(*SCREAMING_SNAKE_CASE_ ) )
# Tokenize
A__ = tokenizer(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
A__ = raw_datasets["train"]
if data_args.max_train_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
A__ = train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
A__ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
A__ = min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
A__ = eval_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
A__ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE_: str ):
A__ , A__ = eval_predictions
A__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
A__ = Trainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model() # Saves the tokenizer too for easy upload
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ = trainer.evaluate()
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE_ )
A__ = min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE_ )
A__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE_ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 626 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
snake_case_ = nn.ModuleList(__lowercase )
def snake_case__ ( self : Dict , __lowercase : torch.FloatTensor , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.Tensor , __lowercase : List[torch.tensor] , __lowercase : List[float] , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[torch.Tensor] = None , __lowercase : Optional[Dict[str, Any]] = None , __lowercase : bool = False , __lowercase : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(__lowercase , __lowercase , self.nets ) ):
snake_case_ , snake_case_ = controlnet(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
# merge samples
if i == 0:
snake_case_ , snake_case_ = down_samples, mid_sample
else:
snake_case_ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowercase , __lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def snake_case__ ( self : Optional[int] , __lowercase : Union[str, os.PathLike] , __lowercase : bool = True , __lowercase : Callable = None , __lowercase : bool = False , __lowercase : Optional[str] = None , ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowercase , is_main_process=__lowercase , save_function=__lowercase , safe_serialization=__lowercase , variant=__lowercase , )
idx += 1
snake_case_ = model_path_to_save + f"_{idx}"
@classmethod
def snake_case__ ( cls : Optional[int] , __lowercase : Optional[Union[str, os.PathLike]] , **__lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ = pretrained_model_path
while os.path.isdir(__lowercase ):
snake_case_ = ControlNetModel.from_pretrained(__lowercase , **__lowercase )
controlnets.append(__lowercase )
idx += 1
snake_case_ = pretrained_model_path + f"_{idx}"
logger.info(f"{len(__lowercase )} controlnets loaded from {pretrained_model_path}." )
if len(__lowercase ) == 0:
raise ValueError(
f"No ControlNets found under {os.path.dirname(__lowercase )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(__lowercase )
| 376 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowercase , )
assert hasattr(self , "env" )
def snake_case__ ( self : Any , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = {
"enabled": True,
"processes_per_host": 8,
}
snake_case_ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
snake_case_ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
snake_case_ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="py36" , )
def snake_case__ ( self : Tuple , __lowercase : Dict ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" )
@parameterized.expand([(1,)] )
def snake_case__ ( self : Any , __lowercase : int ):
"""simple docstring"""
snake_case_ = self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
snake_case_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowercase )
| 376 | 1 |
def _lowerCAmelCase ( __magic_name__ :float , __magic_name__ :int ):
if digit_amount > 0:
return round(number - int(__magic_name__ ) , __magic_name__ )
return number - int(__magic_name__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 700 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Path , __magic_name__ :str = None , __magic_name__ :str = None , __magic_name__ :str = None , ):
if config_name_or_path is None:
UpperCAmelCase_ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
UpperCAmelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase_ = question_encoder_name_or_path
UpperCAmelCase_ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
UpperCAmelCase_ = RagConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = AutoConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = AutoConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = gen_config
UpperCAmelCase_ = question_encoder_config
UpperCAmelCase_ = model_class.from_pretrained_question_encoder_generator(
__magic_name__ , __magic_name__ , config=__magic_name__ )
rag_model.save_pretrained(__magic_name__ )
# Sanity check.
model_class.from_pretrained(__magic_name__ )
# Save tokenizers.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__magic_name__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__magic_name__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 407 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __snake_case ( _lowercase ,_lowercase = "cpu" ,_lowercase = None ):
"""simple docstring"""
UpperCamelCase = torch.load(_lowercase ,map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase ,torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
UpperCamelCase = src_path
torch.save(_lowercase ,_lowercase )
if __name__ == "__main__":
fire.Fire(convert) | 34 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCAmelCase: List[str] =parse(importlib.metadata.version("torch"))
def __snake_case ( __A ,__A ,__A ) -> str:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
lowercase : Tuple = STR_OPERATION_TO_FUNC[operation]
if isinstance(__A ,__A ):
lowercase : List[str] = parse(importlib.metadata.version(__A ) )
return operation(__A ,parse(__A ) )
def __snake_case ( __A ,__A ) -> Optional[int]:
return compare_versions(__A ,__A ,__A )
| 607 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class A__ ( unittest.TestCase ):
def a__ ( self : str ) -> Any:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def a__ ( self : Optional[int] , _UpperCAmelCase : List[str]=1 ) -> str:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def a__ ( self : Optional[int] , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def a__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 688 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 688 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a)
self.assertIsNotNone(a)
self.assertIsInstance(a , a)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(a)
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX)
@jax.jit
def eval(**a):
return model(**a)
eval(**a).block_until_ready()
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(a , revision='aaaaaa')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
with self.assertRaisesRegex(a , 'Use `from_pt=True` to load this model'):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only')
| 73 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowercase : int = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowerCamelCase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
lowerCamelCase_ = F"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , _lowerCamelCase ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = requirement, None, None
else:
lowerCamelCase_ = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , _lowerCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F""" got {requirement}""" )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_full.split(''',''' ) # there could be multiple requirements
lowerCamelCase_ = {}
for w in want_range:
lowerCamelCase_ = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , _lowerCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F""" but got {requirement}""" )
lowerCamelCase_ , lowerCamelCase_ = match[0]
lowerCamelCase_ = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowerCamelCase_ = '''.'''.join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
lowerCamelCase_ = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase : List[Any] ):
lowerCamelCase_ = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_lowerCamelCase , _lowerCamelCase ) | 142 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A : List[Any] = "sshleifer/bart-tiny-random"
A : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : List[str] ) -> List[str]:
return AutoConfig.from_pretrained(__magic_name__ )
def __A ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(__magic_name__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def __A ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(__magic_name__ , tempfile.mkdtemp() , e=1 , d=__magic_name__ )
def __A ( self : Optional[int] ) -> Dict:
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(__magic_name__ , tempfile.mkdtemp() , e=1 , d=__magic_name__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def __A ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ = create_student_by_copying_alternating_layers(__magic_name__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def __A ( self : Union[str, Any] ) -> Optional[int]:
with self.assertRaises(__magic_name__ ):
create_student_by_copying_alternating_layers(__magic_name__ , tempfile.mkdtemp() , e=__magic_name__ , d=__magic_name__ )
| 720 | # Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A : Optional[Any] = get_logger()
A : Optional[dict] = None
class lowerCamelCase (TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self : int , __magic_name__ : int=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : str ) -> Tuple:
super().__init__(features=__magic_name__ )
import jax
from jaxlib.xla_client import Device
if isinstance(__magic_name__ , __magic_name__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(__magic_name__ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE_ = device if isinstance(__magic_name__ , __magic_name__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE_ = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE_ = jnp_array_kwargs
@staticmethod
def __A ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__magic_name__ ): device for device in jax.devices()}
def __A ( self : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[str]:
import jax
import jax.numpy as jnp
if isinstance(__magic_name__ , __magic_name__ ) and column:
if all(
isinstance(__magic_name__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__magic_name__ , axis=0 )
return column
def __A ( self : Tuple , __magic_name__ : int ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(__magic_name__ , (str, bytes, type(__magic_name__ )) ):
return value
elif isinstance(__magic_name__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE_ = {}
if isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.intaa}
elif isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__magic_name__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = np.asarray(__magic_name__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__magic_name__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Optional[int] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__magic_name__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__magic_name__ , "__array__" ) and not isinstance(__magic_name__ , jax.Array ):
SCREAMING_SNAKE_CASE_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__magic_name__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
elif isinstance(__magic_name__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
return self._tensorize(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> Any:
return map_nested(self._recursive_tensorize , __magic_name__ , map_list=__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(__magic_name__ )
return self.recursive_tensorize(__magic_name__ )
def __A ( self : Dict , __magic_name__ : pa.Table ) -> "jax.Array":
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(__magic_name__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._consolidate(__magic_name__ )
return column
def __A ( self : Dict , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
for column_name in batch:
SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] )
return batch
| 356 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A = TypeVar("""KT""")
__A = TypeVar("""VT""")
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "root" , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = key
lowerCAmelCase__ :List[Any] = value
lowerCAmelCase__ :list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"Node({self.key}: {self.value})"
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 0.5 , __UpperCAmelCase = 1_6 ):
'''simple docstring'''
lowerCAmelCase__ :Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Tuple = p
lowerCAmelCase__ :List[Any] = max_level
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self )
if len(__UpperCAmelCase ) == 0:
return F"SkipList(level={self.level})"
lowerCAmelCase__ :Union[str, Any] = max((len(str(__UpperCAmelCase ) ) for item in items) , default=4 )
lowerCAmelCase__ :Any = max(__UpperCAmelCase , 4 ) + 4
lowerCAmelCase__ :Tuple = self.head
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :List[Any] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__UpperCAmelCase , '-' ) + '* ' * len(__UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
while len(node.forward ) != 0:
lowerCAmelCase__ :Dict = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = node.forward
lines.append('None'.ljust(__UpperCAmelCase ) + '* ' * len(__UpperCAmelCase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__UpperCAmelCase )
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ :str = node.forward[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ :Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self._locate_node(__UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(__UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ :Optional[Any] = node.forward[i]
else:
lowerCAmelCase__ :str = update_node.forward[:i]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self._locate_node(__UpperCAmelCase )
if node is not None:
lowerCAmelCase__ :Tuple = value
else:
lowerCAmelCase__ :Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __UpperCAmelCase ):
update_vector.append(self.head )
lowerCAmelCase__ :int = level
lowerCAmelCase__ :int = Node(__UpperCAmelCase , __UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = new_node
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._locate_node(__UpperCAmelCase )
if node is not None:
return node.value
return None
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowerCAmelCase__ :List[str] = skip_list.head
lowerCAmelCase__ :Any = {}
while node.level != 0:
lowerCAmelCase__ :Optional[Any] = node.forward[0]
lowerCAmelCase__ :Optional[int] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowerCAmelCase__ :str = skip_list.head
lowerCAmelCase__ :List[Any] = {}
while node.level != 0:
lowerCAmelCase__ :int = node.forward[0]
lowerCAmelCase__ :str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def __A () ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A () ->Optional[int]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
lowerCAmelCase__ :Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __A () ->Any:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 93 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = StableDiffusionInpaintPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Tuple = frozenset([] )
def __UpperCAmelCase ( self ):
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_a , )
__a = PNDMScheduler(skip_prk_steps=_a )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__a = CLIPTextModel(_a )
__a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __UpperCAmelCase ( self , _a , _a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__a = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
__a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((64, 64) )
__a = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_a ).startswith('''mps''' ):
__a = torch.manual_seed(_a )
else:
__a = torch.Generator(device=_a ).manual_seed(_a )
__a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self ):
__a = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = StableDiffusionInpaintPipeline(**_a )
__a = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
__a = self.get_dummy_inputs(_a )
__a = sd_pipe(**_a ).images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(_a , safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , torch_dtype=torch.floataa , safety_checker=_a , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , output_type='''np''' , )
__a = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
__a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
__a = '''stabilityai/stable-diffusion-2-inpainting'''
__a = PNDMScheduler.from_pretrained(_a , subfolder='''scheduler''' )
__a = StableDiffusionInpaintPipeline.from_pretrained(
_a , safety_checker=_a , scheduler=_a , torch_dtype=torch.floataa , )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__a = torch.manual_seed(0 )
__a = pipe(
prompt=_a , image=_a , mask_image=_a , generator=_a , num_inference_steps=2 , output_type='''np''' , )
__a = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 695 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : List[str] = """glpn"""
def __init__( self : Any , snake_case : str=3 , snake_case : List[Any]=4 , snake_case : int=[2, 2, 2, 2] , snake_case : Union[str, Any]=[8, 4, 2, 1] , snake_case : str=[32, 64, 160, 256] , snake_case : List[Any]=[7, 3, 3, 3] , snake_case : str=[4, 2, 2, 2] , snake_case : Union[str, Any]=[1, 2, 5, 8] , snake_case : str=[4, 4, 4, 4] , snake_case : Union[str, Any]="gelu" , snake_case : int=0.0 , snake_case : int=0.0 , snake_case : List[Any]=0.02 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[Any]=1E-6 , snake_case : List[Any]=64 , snake_case : List[str]=10 , snake_case : List[Any]=-1 , **snake_case : int , ) -> str:
'''simple docstring'''
super().__init__(**snake_case )
A = num_channels
A = num_encoder_blocks
A = depths
A = sr_ratios
A = hidden_sizes
A = patch_sizes
A = strides
A = mlp_ratios
A = num_attention_heads
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = drop_path_rate
A = layer_norm_eps
A = decoder_hidden_size
A = max_depth
A = head_in_index
| 109 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A = logging.get_logger(__name__)
A = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
A = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
A = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = json.loads(f.read() )
A = collections.OrderedDict()
A = collections.OrderedDict()
A = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as f:
A = f.readlines()
A = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase__ ):
A = b
A = idx
for wd in b:
A = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : Any="<|startoftext|>" , snake_case : Any="<|endoftext|>" , snake_case : Tuple=False , **snake_case : List[str] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=snake_case , pad_token=snake_case , bos_token=snake_case , eos_token=snake_case , do_clean_text=snake_case , **snake_case , )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
A = do_clean_text
A , A , A , A = load_vocab_and_emoji(snake_case , snake_case )
A = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
return len(self.raw_vocab )
def A_ ( self : str ) -> str:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def A_ ( self : List[str] , snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(snake_case , clean=self.do_clean_text )
def A_ ( self : int , snake_case : Optional[int] ) -> Dict:
'''simple docstring'''
return self.vocab.get(snake_case , self.vocab.get(self.unk_token ) )
def A_ ( self : int , snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(snake_case )
def A_ ( self : str , snake_case : int ) -> Optional[Any]:
'''simple docstring'''
A = ''.join(snake_case ).strip()
return out_string
def A_ ( self : Optional[int] , snake_case : "Conversation" ) -> List[int]:
'''simple docstring'''
A = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
A = input_ids[-self.model_max_length :]
return input_ids
def A_ ( self : int , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
A = 0
if os.path.isdir(snake_case ):
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
A = token_index
writer.write(','.join(snake_case ) + '\n' )
index += 1
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , snake_case )
return vocab_file, emoji_file
class UpperCAmelCase__ ( UpperCamelCase ):
def __init__( self : str , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[Any] ) -> int:
'''simple docstring'''
A = vocab # same as swe
A = ids_to_tokens # same as bpe
A = emoji
A = np.max([len(snake_case ) for w in self.vocab.keys()] )
A = re.compile(r'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
A = re.compile(r'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
A = re.compile(r'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
A = re.compile(
r'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
A = re.compile(
r'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
A = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.ids_to_tokens )
def A_ ( self : Tuple , snake_case : Any ) -> Optional[int]:
'''simple docstring'''
A = self.content_repattera.sub('<URL>' , snake_case )
A = self.content_repattera.sub('<EMAIL>' , snake_case )
A = self.content_repattera.sub('<TEL>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<DATE>' , snake_case )
A = self.content_repattera.sub('<PRICE>' , snake_case )
A = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def A_ ( self : Any , snake_case : int , snake_case : Any=False ) -> Any:
'''simple docstring'''
A = text.replace(' ' , '<SP>' )
A = text.replace(' ' , '<SP>' )
A = text.replace('\r\n' , '<BR>' )
A = text.replace('\n' , '<BR>' )
A = text.replace('\r' , '<BR>' )
A = text.replace('\t' , '<TAB>' )
A = text.replace('—' , 'ー' )
A = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
A = text.replace(snake_case , snake_case )
if clean:
A = self.clean_text(snake_case )
def check_simbol(snake_case : Union[str, Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 2:
A = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(snake_case : List[Any] ):
A = x.encode()
if len(snake_case ) == 1 and len(snake_case ) == 3:
A = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
A = 0
A = []
while pos < len(snake_case ):
A = min(len(snake_case ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A = [] # (token_id, token, pos)
for e in range(snake_case , snake_case , -1 ):
A = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case ) > 2:
A = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case ) > 0:
# the smallest token_id is adopted
A , A , A = sorted(snake_case , key=lambda snake_case : x[0] )[0]
result.append(snake_case )
A = e
else:
A = pos + 1
A = text[pos:end]
if check_simbol(snake_case ):
result.append('<KIGOU>' )
elif checkuae(snake_case ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
A = end
return result
def A_ ( self : List[str] , snake_case : Optional[Any] , snake_case : Optional[Any]="\n" ) -> List[Any]:
'''simple docstring'''
A = []
A = []
A = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case )
if len(snake_case ) > 0:
words.append(bytearray(snake_case ).decode('utf-8' , errors='replace' ) )
A = ''.join(snake_case )
return text
| 109 | 1 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = 50 # max width of layer names
UpperCamelCase = 70 # max width of quantizer names
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_lowercase : List[str] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=SCREAMING_SNAKE_CASE , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=SCREAMING_SNAKE_CASE , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=SCREAMING_SNAKE_CASE , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=SCREAMING_SNAKE_CASE , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=SCREAMING_SNAKE_CASE , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Tuple:
if args.calibrator == "max":
_lowercase : Optional[int] = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_lowercase : Tuple = 'histogram'
elif args.calibrator == "mse":
_lowercase : int = 'histogram'
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
_lowercase : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=SCREAMING_SNAKE_CASE )
_lowercase : int = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(SCREAMING_SNAKE_CASE )
quant_nn.QuantLinear.set_default_quant_desc_weight(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
logger.info('Configuring Model for Quantization' )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , ['embeddings'] , which='weight' , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [''] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_keyword:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , args.quant_disable_keyword , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_disable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.quant_enable_layer_module:
set_quantizer_by_name(SCREAMING_SNAKE_CASE , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=SCREAMING_SNAKE_CASE )
if args.recalibrate_weights:
recalibrate_weights(SCREAMING_SNAKE_CASE )
if args.fuse_qkv:
fuse_qkv(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.clip_gelu:
clip_gelu(SCREAMING_SNAKE_CASE , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
def fusea(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for mod in [qq, qk, qv]:
if not hasattr(SCREAMING_SNAKE_CASE , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_lowercase : str = qq._amax.detach().item()
_lowercase : int = qk._amax.detach().item()
_lowercase : List[Any] = qv._amax.detach().item()
_lowercase : Optional[int] = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
qq._amax.fill_(SCREAMING_SNAKE_CASE )
qk._amax.fill_(SCREAMING_SNAKE_CASE )
qv._amax.fill_(SCREAMING_SNAKE_CASE )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_lowercase : Optional[int] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=SCREAMING_SNAKE_CASE )
_lowercase : int = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_lowercase : int = mod.weight.shape[0]
_lowercase : List[str] = mod._weight_quantizer._amax.detach()
_lowercase : Any = torch.ones(SCREAMING_SNAKE_CASE , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_lowercase : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_lowercase : Union[str, Any] = set(range(len(mod.weight.size() ) ) ) - axis_set
_lowercase : Union[str, Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=SCREAMING_SNAKE_CASE , keepdims=SCREAMING_SNAKE_CASE ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
_lowercase : Any = amax
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=25 , SCREAMING_SNAKE_CASE=180 , SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
if ignore is None:
_lowercase : Optional[Any] = []
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = [ignore]
_lowercase : Tuple = 0
for name, mod in model.named_modules():
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
_lowercase : Tuple = max(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
for name, mod in model.named_modules():
_lowercase : Any = getattr(SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE )
if not hasattr(SCREAMING_SNAKE_CASE , 'weight' ):
continue
if type(SCREAMING_SNAKE_CASE ) in ignore:
continue
if [True for s in ignore if type(SCREAMING_SNAKE_CASE ) is str and s in name]:
continue
_lowercase : int = F"""Act:{input_q.extra_repr()}"""
_lowercase : Optional[int] = F"""Wgt:{weight_q.extra_repr()}"""
_lowercase : List[str] = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(SCREAMING_SNAKE_CASE ) <= line_width:
logger.info(SCREAMING_SNAKE_CASE )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{' ':{name_width}} {wgt_str}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : int = 0
for name, mod in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
_lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if quantizer_mod is not None:
assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="both" , **SCREAMING_SNAKE_CASE ) -> Any:
_lowercase : Optional[Any] = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_input_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if which in ["weight", "both"]:
set_quantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '_weight_quantizer' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple:
for name, mod in model.named_modules():
if hasattr(SCREAMING_SNAKE_CASE , '_input_quantizer' ) or hasattr(SCREAMING_SNAKE_CASE , '_weight_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
set_quantizers(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info(SCREAMING_SNAKE_CASE )
| 66 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Optional[Any] = logging.get_logger(__name__)
__A : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : Tuple = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A : Dict = {
"gpt-neox-20b": 2_048,
}
class A_ (a_ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**_A )
UpperCAmelCase = add_prefix_space
def _lowercase ( self , _A , _A = None ):
'''simple docstring'''
UpperCAmelCase = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 130 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def A_ (__a , __a , __a ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__a , 2 ) - pow(__a , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__a , 2 ) - pow(__a , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__a , 2 ) + pow(__a , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import baseaa
def A_ (__a ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A_ (__a ):
'''simple docstring'''
return baseaa.aaadecode(__a ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 0 |
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : int ) -> list:
"""simple docstring"""
lowercase__ = word.split()
def justify(__magic_name__ : list , __magic_name__ : int , __magic_name__ : int ) -> str:
lowercase__ = max_width - width
lowercase__ = len(__magic_name__ )
if len(__magic_name__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowercase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowercase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowercase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__magic_name__ ):
num_spaces_between_words_list[i] += 1
lowercase__ = []
for i in range(__magic_name__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__magic_name__ )
lowercase__ = []
lowercase__ = []
lowercase__ = 0
for word in words:
if width + len(__magic_name__ ) + len(__magic_name__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__magic_name__ )
width += len(__magic_name__ )
else:
# justify the line and add it to result
answer.append(justify(__magic_name__ , __magic_name__ , __magic_name__ ) )
# reset new line and new width
lowercase__ , lowercase__ = [word], len(__magic_name__ )
lowercase__ = max_width - width - len(__magic_name__ )
answer.append(""" """.join(__magic_name__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 15 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCAmelCase : Union[str, Any] = 299_792_458
# Symbols
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = symbols('ct x y z')
def lowerCamelCase_ ( UpperCamelCase_ ):
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCamelCase_ ( UpperCamelCase_ ):
return 1 / sqrt(1 - beta(UpperCamelCase_ ) ** 2 )
def lowerCamelCase_ ( UpperCamelCase_ ):
return np.array(
[
[gamma(UpperCamelCase_ ), -gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), 0, 0],
[-gamma(UpperCamelCase_ ) * beta(UpperCamelCase_ ), gamma(UpperCamelCase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = None ):
# Ensure event is not empty
if event is None:
_a : Dict = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCAmelCase : Union[str, Any] = transform(29_979_245)
print('Example of four vector: ')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCAmelCase : Tuple = {ct: c, x: 1, y: 1, z: 1}
__UpperCAmelCase : str = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 471 | 0 |
from math import pi, sqrt
def UpperCamelCase ( _A : int )-> Optional[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(__SCREAMING_SNAKE_CASE ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(__SCREAMING_SNAKE_CASE )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def UpperCamelCase ( )-> str:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__SCREAMING_SNAKE_CASE )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase_ : int = 1.0
while num:
UpperCAmelCase_ : Optional[Any] = float(input("Gamma of: "))
print(F'''gamma({num}) = {gamma(num)}''')
print("\nEnter 0 to exit...") | 704 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase :
lowerCAmelCase : int = None
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCAmelCase__ )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(UpperCAmelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(UpperCAmelCase__ )
A__ = self.feature_extraction_class.from_json_file(UpperCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(UpperCAmelCase__ )[0]
check_json_file_has_correct_format(UpperCAmelCase__ )
A__ = self.feature_extraction_class.from_pretrained(UpperCAmelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __A ( self ):
A__ = self.feature_extraction_class()
self.assertIsNotNone(UpperCAmelCase__ )
| 232 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a : int = 16
a : Dict = 32
def lowerCamelCase__ ( __lowerCamelCase : Accelerator , __lowerCamelCase : int = 16 ):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : str = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Dict = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : str = 8
else:
__UpperCAmelCase : Optional[Any] = None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : List[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a : List[Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCamelCase ) == "1":
__UpperCAmelCase : Union[str, Any] = 2
# Initialize accelerator
__UpperCAmelCase : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Union[str, Any] = config["""lr"""]
__UpperCAmelCase : Dict = int(config["""num_epochs"""] )
__UpperCAmelCase : Tuple = int(config["""seed"""] )
__UpperCAmelCase : Any = int(config["""batch_size"""] )
__UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase : str ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : Dict = model(**__lowerCamelCase )
__UpperCAmelCase : int = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
__UpperCAmelCase : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[str] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : List[str] = parser.parse_args()
__UpperCAmelCase : Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 63 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] ):
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : List[str] = len(__lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
__UpperCAmelCase : str = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCAmelCase : Optional[Any] = left
__UpperCAmelCase : Tuple = point
elif point > right:
__UpperCAmelCase : Optional[Any] = right
__UpperCAmelCase : Dict = point
else:
if item < current_item:
__UpperCAmelCase : Union[str, Any] = point - 1
else:
__UpperCAmelCase : str = point + 1
return None
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCAmelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__lowerCamelCase , __lowerCamelCase , point + 1 , __lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : int ):
if collection != sorted(__lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a : Optional[Any] = 0
if debug == 1:
a : Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a : Tuple = 67
a : List[Any] = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 63 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 | '''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _lowercase :
'''simple docstring'''
def __init__( self : Tuple ) -> Any:
__lowerCAmelCase = {}
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> None:
__lowerCAmelCase = {}
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : float ) -> None:
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = probability
def a ( self : Union[str, Any] ) -> list[str]:
return list(self.connections )
def a ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> str:
__lowerCAmelCase = 0
__lowerCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : list[tuple[str, str, float]] , snake_case_ : int ) -> dict[str, int]:
'''simple docstring'''
__lowerCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case_ , snake_case_ , snake_case_ )
__lowerCAmelCase = Counter(graph.get_nodes() )
__lowerCAmelCase = start
for _ in range(snake_case_ ):
__lowerCAmelCase = graph.transition(snake_case_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The column name of the images in the files."""} )
lowerCamelCase__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the training data."""} )
lowerCamelCase__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the validation data."""} )
lowerCamelCase__ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowerCamelCase__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ = self.validation_dir
SCREAMING_SNAKE_CASE__ = data_files if data_files else None
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowerCamelCase__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCamelCase__ : str = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name or path of preprocessor config."""} )
lowerCamelCase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowerCamelCase__ : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
lowerCamelCase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : float = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __snake_case ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , lowerCAmelCase_ , lowerCAmelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ = ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ = split['''train''']
SCREAMING_SNAKE_CASE__ = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(lowerCAmelCase_ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE__ = ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ = '''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ = '''img'''
else:
SCREAMING_SNAKE_CASE__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ = image_processor.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE__ = (image_processor.size['''height'''], image_processor.size['''width'''])
SCREAMING_SNAKE_CASE__ = Compose(
[
Lambda(lambda lowerCAmelCase_ : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowerCAmelCase_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [transforms(lowerCAmelCase_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase_ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics('''eval''' , lowerCAmelCase_ )
trainer.save_metrics('''eval''' , lowerCAmelCase_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def __snake_case ( lowerCAmelCase_ ) -> Any:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 100 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ):
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , )
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
SCREAMING_SNAKE_CASE__ = model(
A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Dict = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = FalconModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
SCREAMING_SNAKE_CASE__ = alibi
self.model_tester.create_and_check_model(A_ , *A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''single_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = FalconForCausalLM(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values )
SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(A_ , A_ )
for layer in range(len(A_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = '''multi_label_classification'''
SCREAMING_SNAKE_CASE__ = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ )
SCREAMING_SNAKE_CASE__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(A_ , '''use_cache''' ):
return
SCREAMING_SNAKE_CASE__ = model_class(A_ ).to(A_ )
if "use_cache" not in inputs:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model(**A_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
SCREAMING_SNAKE_CASE__ = (
getattr(A_ , '''decoder_layers''' , A_ )
or getattr(A_ , '''num_decoder_layers''' , A_ )
or config.num_hidden_layers
)
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads )
SCREAMING_SNAKE_CASE__ = getattr(A_ , '''d_model''' , config.hidden_size )
SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads
SCREAMING_SNAKE_CASE__ = outputs['''past_key_values''']
self.assertEqual(len(A_ ) , A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape
for i in range(A_ ):
if config.new_decoder_architecture:
SCREAMING_SNAKE_CASE__ = config.num_attention_heads
elif config.multi_query:
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ )[0]
self.assertEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , do_sample=A_ , max_new_tokens=4 )
model.generate(**A_ , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase_ ( self ):
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ )
SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ )
model.eval()
model.to(device=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ )
# Test results are the same with and without cache
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 100 | 1 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowerCAmelCase ( __magic_name__ = "isbn/0140328726" ):
_lowercase: str = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_lowercase: Tuple = f"{olid} is not a valid Open Library olid"
raise ValueError(lowerCAmelCase__ )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: List[Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_lowercase: Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowercase: List[Any] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_lowercase: Union[str, Any] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowercase: List[Any] = ", ".join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_SCREAMING_SNAKE_CASE : Tuple = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
_SCREAMING_SNAKE_CASE : Optional[int] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''') | 721 |
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Any = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __lowerCAmelCase ( __magic_name__ = 1_0_0 ):
_lowercase: Any = 1
_lowercase: Optional[int] = 2
for i in range(2 , max_n + 1 ):
_lowercase: Any = pre_numerator
_lowercase: Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
_lowercase: Dict = cur_numerator
_lowercase: Dict = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 206 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Optional[Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Any ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : Any ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
lowerCAmelCase__ = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
lowerCAmelCase__ = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowerCAmelCase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowerCAmelCase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowerCAmelCase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __lowerCamelCase :
a__: List[str]
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='Translation' , init=lowerCAmelCase , repr=lowerCAmelCase )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def UpperCAmelCase__ ( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __lowerCamelCase :
a__: Optional[List] = None
a__: Optional[int] = None
a__: Optional[str] = None
# Automatically constructed
a__: ClassVar[str] = "dict"
a__: ClassVar[Any] = None
a__: str = field(default='TranslationVariableLanguages' , init=lowerCAmelCase , repr=lowerCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = set(self.languages )
if self.languages and set(UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ = []
for lang, text in translation_dict.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_ , lowerCamelCase_ = zip(*sorted(UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def UpperCAmelCase__ ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 29 | 0 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase__ ( lowercase__ ) -> list[str]:
__lowercase = []
__lowercase = 11
__lowercase = int("""1""" + """0""" * digit_len )
for num in range(UpperCAmelCase__ , UpperCAmelCase__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase__ , UpperCAmelCase__ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
__lowercase = 10
return solutions
def UpperCAmelCase__ ( lowercase__ = 2 ) -> int:
__lowercase = 1.0
for fraction in fraction_list(UpperCAmelCase__ ):
__lowercase = Fraction(UpperCAmelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(solution())
| 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 634 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case : Tuple = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
__snake_case : List[str] = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase_ ( _A ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = CodeGenTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
if kwargs.pop("add_bos_token" , UpperCamelCase__ ):
UpperCAmelCase_ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
UpperCAmelCase_ = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**UpperCamelCase__ )
UpperCAmelCase_ = add_prefix_space
def lowerCamelCase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase_ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase_ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> str:
"""simple docstring"""
UpperCAmelCase_ = super().decode(
token_ids=UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , )
if truncate_before_pattern is not None and len(UpperCamelCase__ ) > 0:
UpperCAmelCase_ = self.truncate(UpperCamelCase__ , UpperCamelCase__ )
return decoded_text
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
"""simple docstring"""
def find_re(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = pattern.search(UpperCamelCase__ , UpperCamelCase__ )
return m.start() if m else -1
UpperCAmelCase_ = [re.compile(UpperCamelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCAmelCase_ = list(re.finditer("^print" , UpperCamelCase__ , re.MULTILINE ) )
if len(UpperCamelCase__ ) > 1:
UpperCAmelCase_ = completion[: prints[1].start()]
UpperCAmelCase_ = list(re.finditer("^def" , UpperCamelCase__ , re.MULTILINE ) )
if len(UpperCamelCase__ ) > 1:
UpperCAmelCase_ = completion[: defs[1].start()]
UpperCAmelCase_ = 0
UpperCAmelCase_ = [
pos for pos in [find_re(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for terminal in terminals] if pos != -1
]
if len(UpperCamelCase__ ) > 0:
return completion[: min(UpperCamelCase__ )]
else:
return completion
| 660 | '''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=7 , UpperCamelCase__=3 , UpperCamelCase__=1_8 , UpperCamelCase__=3_0 , UpperCamelCase__=4_0_0 , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=[0.5, 0.5, 0.5] , UpperCamelCase__=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 1_8}
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( _A , unittest.TestCase ):
a_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCamelCase_ ( self ) -> int:
"""simple docstring"""
pass
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCamelCase_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 660 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase :Optional[Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self: Optional[Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[int]=7 , __UpperCamelCase: List[str]=3 , __UpperCamelCase: Optional[int]=18 , __UpperCamelCase: List[Any]=30 , __UpperCamelCase: Optional[int]=400 , __UpperCamelCase: Tuple=None , __UpperCamelCase: List[str]=True , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: str=None , ):
_a = size if size is not None else {'''height''': 20, '''width''': 20}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = size
_a = do_normalize
_a = do_convert_rgb
_a = [512, 1024, 2048, 4096]
_a = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def _A ( self: Dict ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _A ( self: Optional[int] ):
_a = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_a = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCAmelCase ( _a , unittest.TestCase ):
a: int = PixaStructImageProcessor if is_vision_available() else None
def _A ( self: Tuple ):
_a = PixaStructImageProcessingTester(self )
@property
def _A ( self: List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: List[str] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) )
def _A ( self: List[Any] ):
_a = self.image_processor_tester.prepare_dummy_image()
_a = self.image_processing_class(**self.image_processor_dict )
_a = 2048
_a = image_processor(_A , return_tensors='''pt''' , max_patches=_A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def _A ( self: int ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
_A , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _A ( self: int ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_A ):
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A ).flattened_patches
_a = '''Hello'''
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
_A , return_tensors='''pt''' , max_patches=_A , header_text=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _A ( self: List[Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
_a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
_A , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _A ( self: Optional[Any] ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
_A , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCAmelCase ( _a , unittest.TestCase ):
a: List[Any] = PixaStructImageProcessor if is_vision_available() else None
def _A ( self: int ):
_a = PixaStructImageProcessingTester(self , num_channels=4 )
_a = 3
@property
def _A ( self: Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self: int ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) )
def _A ( self: Any ):
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_a = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_a = image_processor(
_A , return_tensors='''pt''' , max_patches=_A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 714 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class UpperCAmelCase :
def __init__( self: Tuple , __UpperCamelCase: List[str] , __UpperCamelCase: Optional[int]=13 , __UpperCamelCase: List[str]=7 , __UpperCamelCase: List[str]=True , __UpperCamelCase: Union[str, Any]=True , __UpperCamelCase: Optional[int]=99 , __UpperCamelCase: Optional[Any]=32 , __UpperCamelCase: str=5 , __UpperCamelCase: Any=4 , __UpperCamelCase: Any=37 , __UpperCamelCase: List[Any]="gelu" , __UpperCamelCase: Union[str, Any]=0.1 , __UpperCamelCase: str=0.1 , __UpperCamelCase: Optional[Any]=50 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: str=True , __UpperCamelCase: str=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = initializer_range
_a = use_labels
_a = scope
def _A ( self: List[str] ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _A ( self: Tuple ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _A ( self: Any ):
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.prepare_config_and_inputs()
_a = True
_a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _A ( self: Any , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Dict , __UpperCamelCase: int , **__UpperCamelCase: Optional[Any] , ):
_a = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase )
_a = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self: List[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: str , __UpperCamelCase: Tuple , __UpperCamelCase: int , __UpperCamelCase: List[Any] , __UpperCamelCase: Any , **__UpperCamelCase: int , ):
_a = True
_a = BertGenerationEncoder(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , )
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self: Union[str, Any] , __UpperCamelCase: str , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Union[str, Any] , __UpperCamelCase: Dict , __UpperCamelCase: Tuple , __UpperCamelCase: Tuple , **__UpperCamelCase: Tuple , ):
_a = True
_a = True
_a = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
# first forward pass
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
_a = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ) )
def _A ( self: int , __UpperCamelCase: List[Any] , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: Optional[Any] , *__UpperCamelCase: Any , ):
_a = BertGenerationDecoder(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_a = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self: Union[str, Any] ):
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
a: str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
a: Optional[Any] = (BertGenerationDecoder,) if is_torch_available() else ()
a: int = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def _A ( self: Union[str, Any] ):
_a = BertGenerationEncoderTester(self )
_a = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _A ( self: Tuple ):
self.config_tester.run_common_tests()
def _A ( self: Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _A ( self: Union[str, Any] ):
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
_a = '''bert'''
self.model_tester.create_and_check_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _A ( self: List[str] ):
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase )
def _A ( self: str ):
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def _A ( self: Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
def _A ( self: List[Any] ):
_a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase )
@slow
def _A ( self: Tuple ):
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _A ( self: List[Any] ):
_a = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_a = model(__UpperCamelCase )[0]
_a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , __UpperCamelCase )
_a = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def _A ( self: List[str] ):
_a = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
_a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
_a = model(__UpperCamelCase )[0]
_a = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape , __UpperCamelCase )
_a = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 346 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
a : str =[r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0_2_5_7 , _lowerCamelCase = 1_0_2_4 , _lowerCamelCase = 7_6_8 , _lowerCamelCase = 1_2 , _lowerCamelCase = 1_2 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.0_2 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ):
super().__init__()
UpperCamelCase_: int = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
UpperCamelCase_: Optional[int] = prefix_inner_dim
UpperCamelCase_: str = prefix_hidden_dim
UpperCamelCase_: Optional[int] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase_: int = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase_: List[Any] = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
UpperCamelCase_: Optional[Any] = GPTaLMHeadModel(_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCamelCase_: List[str] = self.transformer.transformer.wte(_lowerCamelCase )
UpperCamelCase_: Any = self.encode_prefix(_lowerCamelCase )
UpperCamelCase_: Union[str, Any] = self.decode_prefix(_lowerCamelCase )
UpperCamelCase_: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase_: Tuple = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase_: List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase_: Optional[Any] = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
return self.encode_prefix(_lowerCamelCase )
@torch.no_grad()
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Dict = torch.split(_lowerCamelCase , 1 , dim=0 )
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: Optional[Any] = []
for feature in features:
UpperCamelCase_: Any = self.decode_prefix(feature.to(_lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase_ ,UpperCamelCase_: Optional[Any] = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase_: Optional[Any] = torch.stack(_lowerCamelCase )
UpperCamelCase_: str = torch.stack(_lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _a ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 6_7 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ):
UpperCamelCase_: Tuple = eos_token_id
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Tuple = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int )
UpperCamelCase_: Optional[int] = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase_: List[Any] = input_embeds
else:
UpperCamelCase_: int = self.transformer.transformer.wte(_lowerCamelCase )
for i in range(_lowerCamelCase ):
UpperCamelCase_: int = self.transformer(inputs_embeds=_lowerCamelCase )
UpperCamelCase_: int = outputs.logits
UpperCamelCase_: str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase_: str = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase_ ,UpperCamelCase_: str = logits.topk(_lowerCamelCase , -1 )
UpperCamelCase_: Optional[Any] = generated.expand(_lowerCamelCase , *generated.shape[1:] )
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase_: Optional[int] = next_tokens
else:
UpperCamelCase_: Union[str, Any] = tokens.expand(_lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase_: Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase_: Union[str, Any] = -float(np.inf )
UpperCamelCase_: Optional[Any] = 0
UpperCamelCase_: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase_: int = scores_sum / seq_lengths[:, None]
UpperCamelCase_ ,UpperCamelCase_: Dict = scores_sum_average.view(-1 ).topk(_lowerCamelCase , -1 )
UpperCamelCase_: List[Any] = next_tokens // scores_sum.shape[1]
UpperCamelCase_: int = seq_lengths[next_tokens_source]
UpperCamelCase_: Union[str, Any] = next_tokens % scores_sum.shape[1]
UpperCamelCase_: Tuple = next_tokens.unsqueeze(1 )
UpperCamelCase_: List[str] = tokens[next_tokens_source]
UpperCamelCase_: Dict = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase_: Tuple = generated[next_tokens_source]
UpperCamelCase_: List[str] = scores_sum_average * seq_lengths
UpperCamelCase_: List[str] = is_stopped[next_tokens_source]
UpperCamelCase_: Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase_: Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase_: Dict = is_stopped + next_tokens.eq(_lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase_: List[Any] = scores / seq_lengths
UpperCamelCase_: List[Any] = scores.argsort(descending=_lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase_: str = [tokens[i] for i in order]
UpperCamelCase_: str = torch.stack(_lowerCamelCase , dim=0 )
UpperCamelCase_: Union[str, Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 57 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = ["""input_values""", """attention_mask"""]
def __init__( self , snake_case = 1 , snake_case = 16_000 , snake_case = 0.0 , snake_case = False , snake_case = 80 , snake_case = 16 , snake_case = 64 , snake_case = "hann_window" , snake_case = 1.0 , snake_case = 80 , snake_case = 7_600 , snake_case = 1E-10 , snake_case = 2 , snake_case = True , **snake_case , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , **snake_case )
a__ : Any = do_normalize
a__ : List[str] = return_attention_mask
a__ : List[Any] = num_mel_bins
a__ : List[str] = hop_length
a__ : int = win_length
a__ : List[Any] = win_function
a__ : List[str] = frame_signal_scale
a__ : List[Any] = fmin
a__ : Optional[Any] = fmax
a__ : Union[str, Any] = mel_floor
a__ : Union[str, Any] = reduction_factor
a__ : List[str] = win_length * sampling_rate // 1_000
a__ : List[Any] = hop_length * sampling_rate // 1_000
a__ : List[Any] = optimal_fft_length(self.sample_size )
a__ : Dict = (self.n_fft // 2) + 1
a__ : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case )
a__ : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _snake_case ( snake_case , snake_case , snake_case = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
a__ : Tuple = np.array(snake_case , np.intaa )
a__ : List[str] = []
for vector, length in zip(snake_case , attention_mask.sum(-1 ) ):
a__ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
a__ : List[str] = padding_value
normed_input_values.append(snake_case )
else:
a__ : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _snake_case ( self , snake_case , ) -> np.ndarray:
"""simple docstring"""
a__ : str = spectrogram(
snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , snake_case = None , snake_case = None , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
a__ : Dict = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
else:
a__ : Optional[int] = None
if audio_target is not None:
a__ : List[Any] = self._process_audio(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case , )
if inputs is None:
return inputs_target
else:
a__ : Tuple = inputs_target["input_values"]
a__ : Tuple = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
a__ : Tuple = decoder_attention_mask
return inputs
def _snake_case ( self , snake_case , snake_case = False , snake_case = False , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = None , **snake_case , ) -> BatchFeature:
"""simple docstring"""
a__ : Optional[int] = isinstance(snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
a__ : List[Any] = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
a__ : Any = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
a__ : List[Any] = speech.astype(np.floataa )
# always return batch
if not is_batched:
a__ : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
a__ : Optional[Any] = self.feature_size
# convert into correct format for padding
if is_target:
a__ : List[str] = [self._extract_mel_features(snake_case ) for waveform in speech]
a__ : Optional[Any] = BatchFeature({"input_values": features} )
a__ : str = self.num_mel_bins
else:
a__ : int = BatchFeature({"input_values": speech} )
a__ : int = self.pad(
snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , **snake_case , )
a__ : Any = feature_size_hack
# convert input values to correct format
a__ : Tuple = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
a__ : int = [np.asarray(snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
a__ : Union[str, Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
a__ : Optional[int] = input_values.astype(np.floataa )
# convert attention_mask to correct format
a__ : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
a__ : Tuple = [np.asarray(snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
a__ : Any = (
attention_mask
if self._get_padding_strategies(snake_case , max_length=snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
a__ : Optional[Any] = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=snake_case , padding_value=self.padding_value )
if return_tensors is not None:
a__ : int = padded_inputs.convert_to_tensors(snake_case )
return padded_inputs
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
a__ : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
a__ : str = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 112 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_lowercase = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
_lowercase = {
"Salesforce/codegen-350M-mono": 2048,
}
class _UpperCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['input_ids', 'attention_mask']
lowerCAmelCase = CodeGenTokenizer
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Dict:
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , add_prefix_space=__a , **__a , )
if kwargs.pop("""add_bos_token""" , __a ):
A = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
A = getattr(__a , pre_tok_state.pop("""type""" ) )
A = add_prefix_space
A = pre_tok_class(**__a )
A = add_prefix_space
def _UpperCAmelCase ( self , *a__ , **a__ ) -> BatchEncoding:
A = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def _UpperCAmelCase ( self , *a__ , **a__ ) -> BatchEncoding:
A = kwargs.get("""is_split_into_words""" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def _UpperCAmelCase ( self , a__ , a__ = None ) -> Tuple[str]:
A = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _UpperCAmelCase ( self , a__ , a__ = False , a__ = None , a__ = None , **a__ , ) -> str:
A = super().decode(
token_ids=__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a , **__a , )
if truncate_before_pattern is not None and len(__a ) > 0:
A = self.truncate(__a , __a )
return decoded_text
def _UpperCAmelCase ( self , a__ , a__ ) -> str:
def find_re(a__ , a__ , a__ ):
A = pattern.search(__a , __a )
return m.start() if m else -1
A = [re.compile(__a , re.MULTILINE ) for pattern in truncate_before_pattern]
A = list(re.finditer("""^print""" , __a , re.MULTILINE ) )
if len(__a ) > 1:
A = completion[: prints[1].start()]
A = list(re.finditer("""^def""" , __a , re.MULTILINE ) )
if len(__a ) > 1:
A = completion[: defs[1].start()]
A = 0
A = [
pos for pos in [find_re(__a , __a , __a ) for terminal in terminals] if pos != -1
]
if len(__a ) > 0:
return completion[: min(__a )]
else:
return completion
| 717 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ) -> Union[str, Any]:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Optional[int]:
A = TFViTMAEModel(config=a__ )
A = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> int:
A = TFViTMAEForPreTraining(a__ )
A = model(a__ , training=a__ )
# expected sequence length = num_patches
A = (self.image_size // self.patch_size) ** 2
A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A = 1
A = TFViTMAEForPreTraining(a__ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(a__ , training=a__ )
A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A)) = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self ) -> Dict:
A = TFViTMAEModelTester(self )
A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _UpperCAmelCase ( self ) -> int:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = copy.deepcopy(self._prepare_for_class(a__ , a__ ) )
A = model(**a__ , noise=a__ )
A = outputs_dict[0].numpy()
A = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _UpperCAmelCase ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a__ ):
A = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a__ ):
A = v.numpy()
else:
A = np.array(a__ )
return inputs_np_dict
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = prepare_numpy_arrays(a__ )
A = model(a__ , noise=a__ )
A = model(**a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.constant(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A = tf_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _UpperCAmelCase ( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(a__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(a__ , a__ ),)
if isinstance(a__ , a__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a__ , """_keras_serializable""" , a__ )
}
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.convert_to_tensor(a__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
A = main_layer_class(a__ )
A = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A = tf.keras.Model(a__ , outputs=main_layer(a__ ) )
A = model(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(a__ , """keras_model.h5""" )
model.save(a__ )
A = tf.keras.models.load_model(
a__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(a__ , tf.keras.Model )
A = model(a__ )
self.assert_outputs_same(a__ , a__ )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = outputs.last_hidden_state.numpy()
A = 0
else:
A = outputs.logits.numpy()
A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ , saved_model=a__ )
A = model_class.from_pretrained(a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = after_outputs["""last_hidden_state"""].numpy()
A = 0
else:
A = after_outputs["""logits"""].numpy()
A = 0
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
def _UpperCAmelCase ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a__ )
A = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A = model_class.from_config(model.config )
A = new_model(a__ ) # Build model
new_model.set_weights(model.get_weights() )
A = new_model(a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def _UpperCAmelCase ( self ) -> str:
A = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=a__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A = ViTMAEConfig()
A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A = np.random.uniform(size=(1, num_patches) )
# forward pass
A = model(**a__ , noise=a__ )
# verify the logits
A = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , a__ )
A = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , a__ , atol=1e-4 )
| 546 | 0 |
from sklearn.metrics import recall_score
import datasets
_lowercase : Tuple ="\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
_lowercase : List[str] ="\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
_lowercase : int ="\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=None , __lowercase=1 , __lowercase="binary" , __lowercase=None , __lowercase="warn" , ) -> str:
"""simple docstring"""
a__ : Optional[Any] = recall_score(
__lowercase , __lowercase , labels=__lowercase , pos_label=__lowercase , average=__lowercase , sample_weight=__lowercase , zero_division=__lowercase , )
return {"recall": float(__lowercase ) if score.size == 1 else score}
| 136 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__ (A__ ):
"""simple docstring"""
@slow
@require_torch
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
a__ : List[str] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
a__ : int = bertabert.config.encoder.vocab_size
a__ : Tuple = tokenizer.sep_token_id
a__ : Union[str, Any] = tokenizer.cls_token_id
a__ : Optional[Any] = 1_2_8
a__ : str = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
a__ : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
a__ : str = train_dataset.select(range(3_2 ) )
a__ : Optional[Any] = val_dataset.select(range(1_6 ) )
a__ : Dict = 4
def _map_to_encoder_decoder_inputs(__lowercase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
a__ : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__lowercase , max_length=5_1_2 )
a__ : Tuple = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__lowercase , max_length=1_2_8 )
a__ : Tuple = inputs.input_ids
a__ : Tuple = inputs.attention_mask
a__ : Optional[Any] = outputs.input_ids
a__ : Union[str, Any] = outputs.input_ids.copy()
a__ : Optional[Any] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
a__ : Union[str, Any] = outputs.attention_mask
assert all(len(__lowercase ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__lowercase ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__lowercase ):
a__ : Union[str, Any] = pred.label_ids
a__ : int = pred.predictions
# all unnecessary tokens are removed
a__ : Dict = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
a__ : int = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase )
a__ : Optional[int] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__lowercase ) )] ) / len(__lowercase )
return {"accuracy": accuracy}
# map train dataset
a__ : Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
a__ : str = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__lowercase , batch_size=__lowercase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
a__ : Optional[Any] = self.get_auto_remove_tmp_dir()
a__ : Tuple = SeqaSeqTrainingArguments(
output_dir=__lowercase , per_device_train_batch_size=__lowercase , per_device_eval_batch_size=__lowercase , predict_with_generate=__lowercase , evaluation_strategy="""steps""" , do_train=__lowercase , do_eval=__lowercase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
a__ : Optional[Any] = SeqaSeqTrainer(
model=__lowercase , args=__lowercase , compute_metrics=_compute_metrics , train_dataset=__lowercase , eval_dataset=__lowercase , tokenizer=__lowercase , )
# start training
trainer.train()
| 136 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
UpperCAmelCase__ : Any = StableDiffusionSAGPipeline
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : List[Any] = False
def UpperCAmelCase(self : int ) -> Dict:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case = CLIPTextModel(_A )
snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase(self : str , _A : Dict , _A : Any=0 ) -> Tuple:
if str(_A ).startswith("mps" ):
snake_case = torch.manual_seed(_A )
else:
snake_case = torch.Generator(device=_A ).manual_seed(_A )
snake_case = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase(self : List[Any] ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase(self : Dict ) -> Optional[Any]:
snake_case = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
snake_case = sag_pipe.to(_A )
sag_pipe.set_progress_bar_config(disable=_A )
snake_case = "."
snake_case = torch.manual_seed(0 )
snake_case = sag_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase(self : str ) -> Union[str, Any]:
snake_case = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case = sag_pipe.to(_A )
sag_pipe.set_progress_bar_config(disable=_A )
snake_case = "."
snake_case = torch.manual_seed(0 )
snake_case = sag_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def UpperCAmelCase(self : Optional[int] ) -> Optional[Any]:
snake_case = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case = sag_pipe.to(_A )
sag_pipe.set_progress_bar_config(disable=_A )
snake_case = "."
snake_case = torch.manual_seed(0 )
snake_case = sag_pipe(
[prompt] , width=7_6_8 , height=5_1_2 , generator=_A , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=2_0 , output_type="np" , )
snake_case = output.images
assert image.shape == (1, 5_1_2, 7_6_8, 3)
| 701 |
import os
# Precomputes a list of the 100 first triangular numbers
_A = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
snake_case = os.path.dirname(os.path.realpath(A__ ) )
snake_case = os.path.join(A__ , "words.txt" )
snake_case = ""
with open(A__ ) as f:
snake_case = f.readline()
snake_case = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
snake_case = [
word
for word in [sum(ord(A__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(A__ )
if __name__ == "__main__":
print(solution())
| 294 | 0 |
'''simple docstring'''
UpperCamelCase__ : int = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase__ : Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCAmelCase_ ( _lowerCamelCase: str ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCAmelCase_ ( _lowerCamelCase: str ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : List[str] = """Morse code here!"""
print(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = encrypt(_lowerCamelCase )
print(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = decrypt(_lowerCamelCase )
print(_lowerCamelCase )
if __name__ == "__main__":
main() | 578 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: float = 1 / 1_23_45 ):
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : List[str] = 3
while True:
__SCREAMING_SNAKE_CASE : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
total_partitions += 1
if check_partition_perfect(_lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }") | 578 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
snake_case = logging.get_logger(__name__)
snake_case = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''longformer'''
def __init__( self : Optional[int] ,__A : Union[List[int], int] = 512 ,__A : int = 2 ,__A : int = 1 ,__A : int = 0 ,__A : int = 2 ,__A : int = 3_0522 ,__A : int = 768 ,__A : int = 12 ,__A : int = 12 ,__A : int = 3072 ,__A : str = "gelu" ,__A : float = 0.1 ,__A : float = 0.1 ,__A : int = 512 ,__A : int = 2 ,__A : float = 0.02 ,__A : float = 1e-12 ,__A : bool = False ,**__A : Any ,) -> Optional[Any]:
super().__init__(pad_token_id=__A ,**__A )
_lowercase = attention_window
_lowercase = sep_token_id
_lowercase = bos_token_id
_lowercase = eos_token_id
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = onnx_export
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,__A : "PretrainedConfig" ,__A : str = "default" ,__A : "List[PatchingSpec]" = None ) -> Tuple:
super().__init__(__A ,__A ,__A )
_lowercase = True
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowercase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = super().outputs
if self.task == "default":
_lowercase = {0: 'batch'}
return outputs
@property
def __UpperCAmelCase ( self : Dict ) -> float:
return 1e-4
@property
def __UpperCAmelCase ( self : int ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset ,14 )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : "PreTrainedTokenizerBase" ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = super().generate_dummy_inputs(
preprocessor=__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_lowercase = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_lowercase = 1
return inputs | 535 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,__A : int ,__A : str=13 ,__A : str=7 ,__A : Optional[Any]=True ,__A : Optional[int]=True ,__A : int=True ,__A : List[str]=True ,__A : Dict=99 ,__A : Dict=32 ,__A : Dict=5 ,__A : Dict=4 ,__A : int=37 ,__A : Any="gelu" ,__A : int=0.1 ,__A : int=0.1 ,__A : Dict=512 ,__A : List[str]=16 ,__A : Tuple=2 ,__A : Dict=0.02 ,__A : int=4 ,) -> Optional[int]:
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_attention_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_choices
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_attention_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
if self.use_token_type_ids:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowercase = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__A ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = True
_lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self : Dict ) -> Dict:
for model_class_name in self.all_model_classes:
_lowercase = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A )
@require_flax
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
_lowercase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowercase = model(__A )[0]
_lowercase = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) ,__A )
# compare the actual values for a slice.
_lowercase = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_lowercase = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' ,from_pt=__A )
_lowercase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] ,dtype=jnp.intaa )
_lowercase = model(__A )[0]
# compare the actual values for a slice.
_lowercase = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__A ,atol=1e-4 ) ) | 535 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : List[str] = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": 5_12,
"""google/realm-cc-news-pretrained-encoder""": 5_12,
"""google/realm-cc-news-pretrained-scorer""": 5_12,
"""google/realm-cc-news-pretrained-openqa""": 5_12,
"""google/realm-orqa-nq-openqa""": 5_12,
"""google/realm-orqa-nq-reader""": 5_12,
"""google/realm-orqa-wq-openqa""": 5_12,
"""google/realm-orqa-wq-reader""": 5_12,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RealmTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Any = getattr(_lowerCAmelCase , normalizer_state.pop("""type""" ) )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Tuple = strip_accents
UpperCAmelCase__ : Tuple = tokenize_chinese_chars
UpperCAmelCase__ : Union[str, Any] = normalizer_class(**_lowerCAmelCase )
UpperCAmelCase__ : Dict = do_lower_case
def __UpperCAmelCase ( self , _lowerCAmelCase , **_lowerCAmelCase ):
UpperCAmelCase__ : List[Any] = PaddingStrategy.MAX_LENGTH
UpperCAmelCase__ : Optional[int] = text
UpperCAmelCase__ : Optional[int] = kwargs.pop("""text_pair""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""return_tensors""" , _lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(_lowerCAmelCase ):
if batch_text_pair is not None:
UpperCAmelCase__ : str = batch_text_pair[idx]
else:
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : str = super().__call__(_lowerCAmelCase , _lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = encoded_candidates.get("""input_ids""" )
UpperCAmelCase__ : str = encoded_candidates.get("""attention_mask""" )
UpperCAmelCase__ : Union[str, Any] = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowerCAmelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowerCAmelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = {key: item for key, item in output_data.items() if len(_lowerCAmelCase ) != 0}
return BatchEncoding(_lowerCAmelCase , tensor_type=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : Any = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
UpperCAmelCase__ : List[str] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 79 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase : Any = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["ChineseCLIPFeatureExtractor"]
_lowerCAmelCase : Any = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 193 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=100 , SCREAMING_SNAKE_CASE__ : Optional[int]=" " ) -> List[str]:
'''simple docstring'''
A__ = text.split(SCREAMING_SNAKE_CASE__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
def _snake_case( SCREAMING_SNAKE_CASE__ : dict ) -> dict:
'''simple docstring'''
A__ , A__ = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE__ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE__ )
return {"title": titles, "text": texts}
def _snake_case( SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : DPRContextEncoder , SCREAMING_SNAKE_CASE__ : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
A__ = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )['input_ids']
A__ = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _snake_case( SCREAMING_SNAKE_CASE__ : "RagExampleArguments" , SCREAMING_SNAKE_CASE__ : "ProcessingArguments" , SCREAMING_SNAKE_CASE__ : "IndexHnswArguments" , ) -> Any:
'''simple docstring'''
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A__ = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A__ = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=processing_args.num_proc )
# And compute the embeddings
A__ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE__ )
A__ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A__ = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
A__ = dataset.map(
partial(SCREAMING_SNAKE_CASE__ , ctx_encoder=SCREAMING_SNAKE_CASE__ , ctx_tokenizer=SCREAMING_SNAKE_CASE__ ) , batched=SCREAMING_SNAKE_CASE__ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE__ , )
# And finally save your dataset
A__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A__ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE__ )
# And save the index
A__ = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
default=_UpperCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = field(
default=7_68 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase = field(
default=1_28 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 701 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple,lowercase_ : str,lowercase_ : Optional[Any]=7,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Optional[int]=2_0_0_0,lowercase_ : Dict=2_0_4_8,lowercase_ : int=1_2_8,lowercase_ : str=1,lowercase_ : List[Any]=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Any=4_4_1_0_0,)-> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> Dict:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Tuple,lowercase_ : List[Any]=False,lowercase_ : Optional[int]=False )-> str:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowercase_,'spectrogram_length' ) )
self.assertTrue(hasattr(lowercase_,'feature_size' ) )
self.assertTrue(hasattr(lowercase_,'num_audio_channels' ) )
self.assertTrue(hasattr(lowercase_,'hop_length' ) )
self.assertTrue(hasattr(lowercase_,'chunk_length' ) )
self.assertTrue(hasattr(lowercase_,'sampling_rate' ) )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
A__ = self.feature_extraction_class.from_pretrained(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : str )-> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(lowercase_,'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
A__ = self.feature_extraction_class.from_json_file(lowercase_ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(lowercase_,lowercase_ ) )
self.assertEqual(lowercase_,lowercase_ )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=lowercase_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Optional[Any],lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(lowercase_,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],lowercase_,atol=1E-4 ) )
| 586 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
_lowercase : List[Any] = torch.nn.Linear(1_0 , 1_0 )
_lowercase : Any = torch.optim.SGD(model.parameters() , 0.1 )
_lowercase : str = Accelerator()
_lowercase : Any = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F"""Accelerated optimizer pickling failed with {e}""" )
AcceleratorState._reset_state()
| 66 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__snake_case : str = pd.read_csv('sample_data.csv', header=None)
__snake_case : int = df.shape[:1][0]
# If you're using some other dataset input the target column
__snake_case : Optional[int] = df.iloc[:, 1:2]
__snake_case : Optional[Any] = actual_data.values.reshape(len_data, 1)
__snake_case : Optional[Any] = MinMaxScaler().fit_transform(actual_data)
__snake_case : List[str] = 10
__snake_case : Optional[int] = 5
__snake_case : Dict = 20
__snake_case : int = len_data - periods * look_back
__snake_case : Any = actual_data[:division]
__snake_case : List[str] = actual_data[division - look_back :]
__snake_case , __snake_case : List[Any] = [], []
__snake_case , __snake_case : Union[str, Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__snake_case : Any = np.array(train_x)
__snake_case : List[Any] = np.array(test_x)
__snake_case : Tuple = np.array([list(i.ravel()) for i in train_y])
__snake_case : int = np.array([list(i.ravel()) for i in test_y])
__snake_case : Optional[int] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='mean_squared_error', optimizer='adam')
__snake_case : Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__snake_case : Any = model.predict(x_test)
| 215 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCAmelCase = namedtuple("CoinsDistribResult", "moves excess")
def lowerCAmelCase_ ( __A : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__A : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__A : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__A ) != count_coins(__A ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(__A : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case: List[Any] = get_distrib(node.left )
snake_case: Any = get_distrib(node.right )
snake_case: Dict = 1 - left_distrib_excess
snake_case: Any = 1 - right_distrib_excess
snake_case: List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__A )
+ abs(__A )
)
snake_case: List[str] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__A , __A )
return get_distrib(__A )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Dict = question_encoder
snake_case: Union[str, Any] = generator
snake_case: Optional[int] = self.question_encoder
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCamelCase ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
snake_case: int = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
snake_case: str = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
snake_case: Dict = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = self.question_encoder
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Dict = self.generator
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "longest" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
snake_case: Optional[Any] = self.current_tokenizer.model_max_length
snake_case: int = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case: Any = self.current_tokenizer.model_max_length
snake_case: List[str] = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
snake_case: Dict = labels['input_ids']
return model_inputs | 692 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase (*_lowerCamelCase : str , _lowerCamelCase : Optional[Union[Dict, Any]] = None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : str=2 )-> str:
'''simple docstring'''
from .. import __version__
__snake_case = take_from
__snake_case = ()
if not isinstance(args[0] , _lowerCamelCase ):
__snake_case = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCamelCase ).base_version ) >= version.parse(_lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
__snake_case = None
if isinstance(_lowerCamelCase , _lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCamelCase ),)
__snake_case = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(_lowerCamelCase , _lowerCamelCase ):
values += (getattr(_lowerCamelCase , _lowerCamelCase ),)
__snake_case = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _lowerCamelCase , stacklevel=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0:
__snake_case = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case = call_frame.filename
__snake_case = call_frame.lineno
__snake_case = call_frame.function
__snake_case , __snake_case = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(_lowerCamelCase ) == 0:
return
elif len(_lowerCamelCase ) == 1:
return values[0]
return values
| 24 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __snake_case ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" ,1024 )
print("""Key files generation successful.""" )
def __snake_case ( __A ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
lowercase : int = rabinMiller.generate_large_prime(__A )
print("""Generating prime q...""" )
lowercase : Optional[int] = rabinMiller.generate_large_prime(__A )
lowercase : Optional[int] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
lowercase : List[str] = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(__A ,(p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
lowercase : List[Any] = cryptoMath.find_mod_inverse(__A ,(p - 1) * (q - 1) )
lowercase : Any = (n, e)
lowercase : Optional[Any] = (n, d)
return (public_key, private_key)
def __snake_case ( __A ,__A ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
lowercase , lowercase : Optional[int] = generate_key(__A )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 607 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["image_processor", "tokenizer"]
__UpperCAmelCase : Tuple = "LayoutLMv2ImageProcessor"
__UpperCAmelCase : int = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , lowercase__ : Optional[Any]=None , lowercase__ : int=None , **lowercase__ : Tuple ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase__ , )
__lowercase : Optional[int] = kwargs.pop("feature_extractor" )
__lowercase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase__ , lowercase__ )
def __call__( self : int , lowercase__ : Union[str, Any] , lowercase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase__ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase__ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase__ : Optional[Union[List[int], List[List[int]]]] = None , lowercase__ : bool = True , lowercase__ : Union[bool, str, PaddingStrategy] = False , lowercase__ : Union[bool, str, TruncationStrategy] = None , lowercase__ : Optional[int] = None , lowercase__ : int = 0 , lowercase__ : Optional[int] = None , lowercase__ : Optional[bool] = None , lowercase__ : Optional[bool] = None , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = False , lowercase__ : bool = True , lowercase__ : Optional[Union[str, TensorType]] = None , **lowercase__ : Union[str, Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
__lowercase : Any = self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
__lowercase : List[str] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowercase : List[str] = features["words"]
__lowercase : Dict = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__lowercase : Optional[Any] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__lowercase : Any = self.get_overflowing_images(lowercase__ , encoded_inputs["overflow_to_sample_mapping"] )
__lowercase : Union[str, Any] = images
return encoded_inputs
def snake_case ( self : Tuple , lowercase__ : str , lowercase__ : Dict ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__lowercase : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(lowercase__ )} and {len(lowercase__ )}' )
return images_with_overflow
def snake_case ( self : Optional[Any] , *lowercase__ : Dict , **lowercase__ : str ):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def snake_case ( self : Tuple , *lowercase__ : Tuple , **lowercase__ : Dict ):
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def snake_case ( self : str ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def snake_case ( self : Any ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase__ , )
return self.image_processor_class
@property
def snake_case ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase__ , )
return self.image_processor
| 281 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowercase__ : Any , **lowercase__ : Union[str, Any] ):
super().__init__(*lowercase__ , **lowercase__ )
def snake_case ( self : Any , lowercase__ : Dict , lowercase__ : Dict ):
__lowercase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase__ )
__lowercase : Union[str, Any] = self.values[key]
def snake_case ( self : str ):
return (
sum(self.charge_factor - len(lowercase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase__ ) == 0
):
return key
return super()._collision_resolution(lowercase__ , lowercase__ )
| 281 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = {}
if train_file is not None:
SCREAMING_SNAKE_CASE_ = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE_ = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE_ = [test_file]
SCREAMING_SNAKE_CASE_ = datasets.load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE_ = features_name.pop(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE_ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
SCREAMING_SNAKE_CASE_ = tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = {}
if len(SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' ) , batched=SCREAMING_SNAKE_CASE , )
elif len(SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='max_length' , ) , batched=SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE_ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE_ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE_ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class a_ :
A = field(metadata={'''help''': '''Which column contains the label'''} )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The path of the training file'''} )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The path of the development file'''} )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The path of the test file'''} )
A = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class a_ :
A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = TFTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 205 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ = FunnelBaseModel(SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 205 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if len(lowerCamelCase__ ) == 0:
return False
__lowerCAmelCase : List[Any] = len(lowerCamelCase__ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , lowerCamelCase__ )
else:
return binary_search(a_list[midpoint + 1 :] , lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
lowerCamelCase__ = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCamelCase__ = """""" if binary_search(sequence, target) else """not """
print(f'{target} was {not_str}found in {sequence}') | 721 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if n_term == "":
return []
__lowerCAmelCase : list = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"1/{temp + 1}" if series else '1' )
return series
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term)) | 549 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE :Dict = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Dict=None , lowerCAmelCase_ :Any=None , lowerCAmelCase_ :Tuple=None , lowerCAmelCase_ :int=None , lowerCAmelCase_ :str=None , lowerCAmelCase_ :int=None , )->Optional[Any]:
'''simple docstring'''
if attention_mask is None:
snake_case_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any=1_3 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=9_9 , _lowerCAmelCase : str=1_6 , _lowerCAmelCase : Optional[Any]=2 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=3_2 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : List[str]=0.02 , ) -> str:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = initializer_range
def lowerCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case_ = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
snake_case_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , )
snake_case_ = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ = 2_0
snake_case_ = model_class_name(SCREAMING_SNAKE_CASE__ )
snake_case_ = model.encode(inputs_dict["input_ids"] )
snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
snake_case_ = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ = 2_0
snake_case_ = model_class_name(SCREAMING_SNAKE_CASE__ )
snake_case_ = model.encode(inputs_dict["input_ids"] )
snake_case_ = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
snake_case_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
snake_case_ = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 99
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
snake_case_ = input_ids.shape[0]
snake_case_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = self._get_config_and_data()
snake_case_ = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
snake_case_ = lm_model(input_ids=SCREAMING_SNAKE_CASE__ )
snake_case_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
snake_case_ = FlaxBlenderbotSmallForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
snake_case_ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
snake_case_ = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
snake_case_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
snake_case_ = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
snake_case_ = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
snake_case_ = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( A__ , unittest.TestCase , A__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
snake_case_ = FlaxBlenderbotSmallModelTester(self )
def lowerCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(_lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest("JIT Enabled" ):
snake_case_ = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case_ = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = model_class(SCREAMING_SNAKE_CASE__ )
snake_case_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
snake_case_ = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest("JIT Enabled" ):
snake_case_ = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
snake_case_ = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case_ = np.ones((1, 1) ) * model.config.eos_token_id
snake_case_ = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 283 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : List[str] = IFInpaintingPipeline
A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCamelCase ( self ):
return self._get_dummy_components()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
self._test_save_load_local()
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 319 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'convnextv2'
def __init__( self, __a=3, __a=4, __a=4, __a=None, __a=None, __a="gelu", __a=0.02, __a=1E-12, __a=0.0, __a=224, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_stages
_lowerCAmelCase : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowerCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : str = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 721 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Optional[int] ={
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[Any] =['ConvNextFeatureExtractor']
lowerCAmelCase__ : int =['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str =[
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Any =[
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase__ : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 148 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
_UpperCamelCase : Dict ={
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = "sew"
def __init__( self ,A__=32 ,A__=768 ,A__=12 ,A__=12 ,A__=3072 ,A__=2 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=0.1 ,A__=0.0 ,A__=0.1 ,A__=0.1 ,A__=0.02 ,A__=1E-5 ,A__="group" ,A__="gelu" ,A__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,A__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,A__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,A__=False ,A__=128 ,A__=16 ,A__=True ,A__=0.05 ,A__=10 ,A__=2 ,A__=0.0 ,A__=10 ,A__=0 ,A__="mean" ,A__=False ,A__=False ,A__=256 ,A__=0 ,A__=1 ,A__=2 ,**A__ ,):
super().__init__(**A__ ,pad_token_id=A__ ,bos_token_id=A__ ,eos_token_id=A__ )
_A : str = hidden_size
_A : Optional[Any] = feat_extract_norm
_A : Any = feat_extract_activation
_A : Optional[int] = list(A__ )
_A : str = list(A__ )
_A : Optional[Any] = list(A__ )
_A : List[Any] = conv_bias
_A : Optional[int] = num_conv_pos_embeddings
_A : Any = num_conv_pos_embedding_groups
_A : Union[str, Any] = len(self.conv_dim )
_A : List[Any] = num_hidden_layers
_A : str = intermediate_size
_A : List[Any] = squeeze_factor
_A : Tuple = hidden_act
_A : Union[str, Any] = num_attention_heads
_A : Optional[int] = hidden_dropout
_A : Optional[int] = attention_dropout
_A : Optional[int] = activation_dropout
_A : Tuple = feat_proj_dropout
_A : Optional[Any] = final_dropout
_A : str = layerdrop
_A : int = layer_norm_eps
_A : List[str] = initializer_range
_A : List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A : Union[str, Any] = apply_spec_augment
_A : str = mask_time_prob
_A : Any = mask_time_length
_A : int = mask_time_min_masks
_A : str = mask_feature_prob
_A : Optional[Any] = mask_feature_length
_A : Dict = mask_feature_min_masks
# ctc loss
_A : Optional[int] = ctc_loss_reduction
_A : List[str] = ctc_zero_infinity
# sequence classification
_A : List[Any] = use_weighted_layer_sum
_A : Optional[int] = classifier_proj_size
@property
def A__ ( self ):
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 206 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
lowercase__ ='\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
lowercase__ ='\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
lowercase__ ='\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase="binary" , UpperCAmelCase=None ):
a_ = fa_score(
UpperCAmelCase , UpperCAmelCase , labels=UpperCAmelCase , pos_label=UpperCAmelCase , average=UpperCAmelCase , sample_weight=UpperCAmelCase )
return {"f1": float(UpperCAmelCase ) if score.size == 1 else score}
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCamelCase__ : Dict = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCamelCase__ : Dict = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase__ : Any = F'''down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[Any] = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase__ : Tuple = F'''down_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : Tuple = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase__ : Optional[Any] = F'''up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Optional[Any] = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase__ : Any = F'''up_blocks.{i}.attentions.{j}.'''
lowerCamelCase__ : List[str] = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase__ : Dict = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCamelCase__ : Optional[int] = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase__ : Dict = F'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : List[Any] = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase__ : int = '''mid_block.attentions.0.'''
lowerCamelCase__ : Any = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase__ : Any = F'''mid_block.resnets.{j}.'''
lowerCamelCase__ : Any = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> List[Any]:
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
_UpperCAmelCase : Union[str, Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_UpperCAmelCase : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_UpperCAmelCase : Tuple = v.replace(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Tuple = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_UpperCAmelCase : Dict = v.replace(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = v
_UpperCAmelCase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase__ : str = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase__ : str = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : List[str] = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase__ : Tuple = F'''down_blocks.{i}.downsamplers.0.'''
lowerCamelCase__ : int = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase__ : Dict = F'''up_blocks.{i}.upsamplers.0.'''
lowerCamelCase__ : List[Any] = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase__ : Any = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCamelCase__ : Tuple = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase__ : Any = F'''mid_block.resnets.{i}.'''
lowerCamelCase__ : Any = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase__ : Any = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def UpperCamelCase ( _lowerCAmelCase : int ) -> Union[str, Any]:
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape, 1, 1 )
def UpperCamelCase ( _lowerCAmelCase : str ) -> Optional[int]:
_UpperCAmelCase : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_UpperCAmelCase : List[Any] = v.replace(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_UpperCAmelCase : Dict = v.replace(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Any = v
_UpperCAmelCase : Any = {v: vae_state_dict[k] for k, v in mapping.items()}
_UpperCAmelCase : Any = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
_UpperCAmelCase : Tuple = reshape_weight_for_sd(_lowerCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCamelCase__ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase__ : Tuple = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase__ : int = {'''q''': 0, '''k''': 1, '''v''': 2}
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> Any:
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[int] = {}
_UpperCAmelCase : str = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
_UpperCAmelCase : List[str] = k[: -len(""".q_proj.weight""" )]
_UpperCAmelCase : List[Any] = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
_UpperCAmelCase : Optional[Any] = [None, None, None]
_UpperCAmelCase : Any = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
_UpperCAmelCase : List[Any] = k[: -len(""".q_proj.bias""" )]
_UpperCAmelCase : int = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
_UpperCAmelCase : Tuple = [None, None, None]
_UpperCAmelCase : str = v
continue
_UpperCAmelCase : Tuple = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )], _lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
_UpperCAmelCase : Union[str, Any] = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )], _lowerCAmelCase )
_UpperCAmelCase : List[Any] = torch.cat(_lowerCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
_UpperCAmelCase : str = textenc_pattern.sub(lambda _lowerCAmelCase : protected[re.escape(m.group(0 ) )], _lowerCAmelCase )
_UpperCAmelCase : Tuple = torch.cat(_lowerCAmelCase )
return new_state_dict
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> List[Any]:
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase__ : List[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase__ : Optional[int] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCamelCase__ : Any = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase__ : Dict = load_file(unet_path, device='''cpu''')
else:
lowerCamelCase__ : int = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCamelCase__ : Union[str, Any] = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCamelCase__ : Optional[int] = load_file(vae_path, device='''cpu''')
else:
lowerCamelCase__ : Optional[int] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCamelCase__ : List[str] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCamelCase__ : Optional[int] = load_file(text_enc_path, device='''cpu''')
else:
lowerCamelCase__ : Dict = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCamelCase__ : Optional[Any] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCamelCase__ : Any = convert_unet_state_dict(unet_state_dict)
lowerCamelCase__ : Union[str, Any] = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase__ : List[Any] = convert_vae_state_dict(vae_state_dict)
lowerCamelCase__ : List[Any] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase__ : Dict = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase__ : Optional[Any] = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCamelCase__ : Union[str, Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase__ : Optional[int] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase__ : List[str] = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase__ : Dict = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase__ : Dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase__ : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase__ : str = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 238 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( _lowerCAmelCase : int ) -> list[str]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Tuple = int("""1""" + """0""" * digit_len )
for num in range(_lowerCAmelCase, _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase, _lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def UpperCamelCase ( _lowerCAmelCase : int = 2 ) -> int:
_UpperCAmelCase : Dict = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 238 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ConsistencyModelPipeline
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
UpperCamelCase__ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
])
@property
def lowercase__ ( self : List[Any] )->Union[str, Any]:
_UpperCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def lowercase__ ( self : Optional[Any] )->Optional[int]:
_UpperCAmelCase = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[str]=False )->int:
if class_cond:
_UpperCAmelCase = self.dummy_cond_unet
else:
_UpperCAmelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowercase__ ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : int=0 )->List[str]:
if str(__UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**__UpperCamelCase )
_UpperCAmelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Union[str, Any] )->Tuple:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=__UpperCamelCase )
_UpperCAmelCase = ConsistencyModelPipeline(**__UpperCamelCase )
_UpperCAmelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : Tuple )->int:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = ConsistencyModelPipeline(**__UpperCamelCase )
_UpperCAmelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self : int )->int:
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components(class_cond=__UpperCamelCase )
_UpperCAmelCase = ConsistencyModelPipeline(**__UpperCamelCase )
_UpperCAmelCase = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = 0
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 3_2, 3_2, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] )->List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Optional[int]="cpu" , __UpperCamelCase : Tuple=torch.floataa , __UpperCamelCase : Dict=(1, 3, 6_4, 6_4) )->Optional[Any]:
_UpperCAmelCase = torch.manual_seed(__UpperCamelCase )
_UpperCAmelCase = {
'''num_inference_steps''': None,
'''timesteps''': [2_2, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
_UpperCAmelCase = self.get_fixed_latents(seed=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase , shape=__UpperCamelCase )
_UpperCAmelCase = latents
return inputs
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Dict=0 , __UpperCamelCase : Union[str, Any]="cpu" , __UpperCamelCase : Tuple=torch.floataa , __UpperCamelCase : str=(1, 3, 6_4, 6_4) )->List[Any]:
if type(__UpperCamelCase ) == str:
_UpperCAmelCase = torch.device(__UpperCamelCase )
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
_UpperCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
return latents
def lowercase__ ( self : Any )->int:
_UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pipe.to(torch_device=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self : Any )->Dict:
_UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pipe.to(torch_device=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = 1
_UpperCAmelCase = None
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def lowercase__ ( self : str )->List[str]:
_UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pipe.to(torch_device=__UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_inputs(get_fixed_latents=__UpperCamelCase , device=__UpperCamelCase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCamelCase , enable_math=__UpperCamelCase , enable_mem_efficient=__UpperCamelCase ):
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def lowercase__ ( self : str )->Union[str, Any]:
_UpperCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
_UpperCAmelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
_UpperCAmelCase = ConsistencyModelPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
pipe.to(torch_device=__UpperCamelCase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = self.get_inputs(get_fixed_latents=__UpperCamelCase , device=__UpperCamelCase )
_UpperCAmelCase = 1
_UpperCAmelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=__UpperCamelCase , enable_math=__UpperCamelCase , enable_mem_efficient=__UpperCamelCase ):
_UpperCAmelCase = pipe(**__UpperCamelCase ).images
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 95 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__A : Tuple = None
__A : Optional[Any] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
__A : Optional[int] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
__A : List[Any] = "▁"
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """token_type_ids"""]
UpperCamelCase__ = FNetTokenizer
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Any="[SEP]" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[int]="[CLS]" , __UpperCamelCase : List[Any]="[MASK]" , **__UpperCamelCase : Optional[Any] , )->Dict:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCAmelCase = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 95 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def _a ( UpperCAmelCase__ ) -> bytes:
__SCREAMING_SNAKE_CASE = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
__SCREAMING_SNAKE_CASE = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCAmelCase__ ).content
if __name__ == "__main__":
lowerCAmelCase__ =input("Enter Video/IGTV url: ").strip()
lowerCAmelCase__ =F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 482 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class A__( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''swish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''silu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : List[str] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''mish''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _a ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_activation('''gelu''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 482 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase_ = logging.getLogger(__name__)
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
return (preds == labels).mean()
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase = field(
default=a, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
__UpperCamelCase = field(metadata={'help': 'Should contain the data files for the task.'} )
__UpperCamelCase = field(
default=1_2_8, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase = field(
default=a, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , A )
# Set seed
set_seed(training_args.seed )
try:
UpperCAmelCase__ =processors[data_args.task_name]()
UpperCAmelCase__ =processor.get_labels()
UpperCAmelCase__ =len(A )
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase__ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCAmelCase__ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=A , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(A ) -> Dict:
UpperCAmelCase__ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(A , p.label_ids )}
# Data collator
UpperCAmelCase__ =DataCollatorWithPadding(A , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCAmelCase__ =Trainer(
model=A , args=A , train_dataset=A , eval_dataset=A , compute_metrics=A , data_collator=A , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase__ ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase__ =trainer.evaluate()
UpperCAmelCase__ =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_master():
with open(A , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , A , A )
writer.write("%s = %s\n" % (key, value) )
results.update(A )
return results
def _UpperCAmelCase ( A ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 510 |
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCamelCase_ = [
'kernels/rwkv/wkv_cuda.cu',
'kernels/rwkv/wkv_op.cpp',
'kernels/deformable_detr/ms_deform_attn.h',
'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh',
'models/graphormer/algos_graphormer.pyx',
]
def _UpperCAmelCase ( A ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.')
UpperCamelCase_ = parser.parse_args()
if args.check_lib:
UpperCamelCase_ = importlib.import_module('transformers')
UpperCamelCase_ = Path(transformers_module.__file__).parent
else:
UpperCamelCase_ = Path.cwd() / 'build/lib/transformers'
if not test_custom_files_are_present(transformers_path):
raise ValueError('The built release does not contain the custom files. Fix this before going further!')
| 510 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase_ ( UpperCAmelCase ):
def __init__( self , _UpperCamelCase=0.01 , _UpperCamelCase=1000 )-> Optional[int]:
_A = p_stop
_A = max_length
def __iter__( self )-> List[str]:
_A = 0
_A = False
while not stop and count < self.max_length:
yield count
count += 1
_A = random.random() < self.p_stop
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=True )-> List[str]:
_A = [
BatchSamplerShard(_UpperCamelCase , 2 , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
for i in range(2 )
]
_A = [list(_UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_UpperCamelCase ) for shard in batch_sampler_shards] , [len(_UpperCamelCase ) for e in expected] )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[int]:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [[], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self )-> Dict:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [[], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase )
def UpperCamelCase ( self )-> List[str]:
# Check the shards when the dataset is a round multiple of total batch size.
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(24 ) , batch_size=3 , drop_last=_UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(21 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(22 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(20 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(2 ) , batch_size=3 , drop_last=_UpperCamelCase )
_A = [[], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , even_batches=_UpperCamelCase )
def UpperCamelCase ( self )-> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(24 ) , batch_size=4 , drop_last=_UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(22 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(21 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
# Check the shards when the dataset is very small.
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [[[0, 1]], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
_A = BatchSampler(range(2 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = [[], []]
self.check_batch_sampler_shards(_UpperCamelCase , _UpperCamelCase , split_batches=_UpperCamelCase , even_batches=_UpperCamelCase )
def UpperCamelCase ( self )-> Optional[int]:
_A = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_A = [BatchSamplerShard(_UpperCamelCase , 2 , _UpperCamelCase , even_batches=_UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=2 , _UpperCamelCase=False )-> List[str]:
random.seed(_UpperCamelCase )
_A = list(_UpperCamelCase )
_A = [
IterableDatasetShard(
_UpperCamelCase , batch_size=_UpperCamelCase , drop_last=_UpperCamelCase , num_processes=_UpperCamelCase , process_index=_UpperCamelCase , split_batches=_UpperCamelCase , )
for i in range(_UpperCamelCase )
]
_A = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_UpperCamelCase )
iterable_dataset_lists.append(list(_UpperCamelCase ) )
_A = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_A = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
self.assertTrue(len(_UpperCamelCase ) % shard_batch_size == 0 )
_A = []
for idx in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_UpperCamelCase ) < len(_UpperCamelCase ):
reference += reference
self.assertListEqual(_UpperCamelCase , reference[: len(_UpperCamelCase )] )
def UpperCamelCase ( self )-> Dict:
_A = 42
_A = RandomIterableDataset()
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
# Edge case with a very small dataset
_A = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
self.check_iterable_dataset_shards(_UpperCamelCase , _UpperCamelCase , batch_size=4 , drop_last=_UpperCamelCase , split_batches=_UpperCamelCase )
def UpperCamelCase ( self )-> Tuple:
_A = BatchSampler(range(16 ) , batch_size=4 , drop_last=_UpperCamelCase )
_A = SkipBatchSampler(_UpperCamelCase , 2 )
self.assertListEqual(list(_UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self )-> Union[str, Any]:
_A = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self )-> Optional[Any]:
_A = DataLoader(list(range(16 ) ) , batch_size=4 )
_A = skip_first_batches(_UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase ( self )-> Optional[Any]:
_A = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCamelCase ( self )-> Tuple:
Accelerator()
_A = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 292 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCAmelCase = """Create a default config file for Accelerate with only a few flags set."""
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any]="no" , __UpperCamelCase : str = default_json_config_file , __UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
_A = Path(__UpperCamelCase )
path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
_A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
_A = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_A = torch.cuda.device_count()
_A = num_gpus
_A = False
if num_gpus > 1:
_A = 'MULTI_GPU'
else:
_A = 'NO'
elif is_xpu_available() and use_xpu:
_A = torch.xpu.device_count()
_A = num_xpus
_A = False
if num_xpus > 1:
_A = 'MULTI_XPU'
else:
_A = 'NO'
elif is_npu_available():
_A = torch.npu.device_count()
_A = num_npus
_A = False
if num_npus > 1:
_A = 'MULTI_NPU'
else:
_A = 'NO'
else:
_A = 0
_A = True
_A = 1
_A = 'NO'
_A = ClusterConfig(**__UpperCamelCase )
config.to_json_file(__UpperCamelCase )
return path
def lowerCamelCase_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : str ) -> Any:
"""simple docstring"""
_A = parser.add_parser('default' , parents=__UpperCamelCase , help=__UpperCamelCase , formatter_class=__UpperCamelCase )
parser.add_argument(
'--config_file' , default=__UpperCamelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__UpperCamelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCamelCase_ ( __UpperCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
_A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 292 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = CanineTokenizer
lowercase = False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def _lowerCamelCase ( self : List[str] , **a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
lowerCAmelCase__ : Optional[Any] = 1_024
return tokenizer
@require_torch
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.canine_tokenizer
lowerCAmelCase__ : int = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
lowerCAmelCase__ : Dict = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase__ : int = tokenizer(a , padding=a , return_tensors='pt' )
self.assertIsInstance(a , a )
lowerCAmelCase__ : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a , a )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.canine_tokenizer
lowerCAmelCase__ : Dict = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
lowerCAmelCase__ : Optional[Any] = tokenizer(a , padding=a , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , a )
self.assertIn('attention_mask' , a )
self.assertIn('token_type_ids' , a )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.canine_tokenizer
lowerCAmelCase__ : int = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
lowerCAmelCase__ : Dict = tokenizer(
text_target=a , max_length=32 , padding='max_length' , truncation=a , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(a )
lowerCAmelCase__ : Any = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
shutil.rmtree(a )
lowerCAmelCase__ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Dict = tempfile.mkdtemp()
lowerCAmelCase__ : int = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ : List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase__ : str = chr(0Xe_007 )
additional_special_tokens.append(a )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ : int = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
lowerCAmelCase__ : str = tokenizer.__class__.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
self.assertIn(a , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_clean_sequence(a )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : Tuple = 0Xe_005
lowerCAmelCase__ : Tuple = chr(a )
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
self.assertEqual(len(a ) , 1 )
lowerCAmelCase__ : int = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a )
lowerCAmelCase__ : List[str] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Any = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : int = tokenizer.encode(a , add_special_tokens=a )
self.assertEqual(a , input_encoded + special_token_id )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(a , skip_special_tokens=a )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : List[str] = chr(0Xe_005 )
lowerCAmelCase__ : int = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
lowerCAmelCase__ : Any = tokenizer.tokenize(a )
lowerCAmelCase__ : int = tokenizer.tokenize(a )
self.assertEqual(len(a ) , 1 )
self.assertEqual(len(a ) , 1 )
self.assertEqual(token_a[0] , a )
self.assertEqual(token_a[0] , a )
@require_tokenizers
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : int = 0Xe_006
lowerCAmelCase__ : Union[str, Any] = chr(a )
lowerCAmelCase__ : Optional[Any] = AddedToken(a , lstrip=a )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a )
tokenizer.from_pretrained(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a )
with open(os.path.join(a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ : Optional[int] = json.load(a )
with open(os.path.join(a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ : str = json.load(a )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : int = 0Xe_006
lowerCAmelCase__ : int = chr(a )
lowerCAmelCase__ : Tuple = [new_token_a]
lowerCAmelCase__ : Optional[Any] = [new_token_a]
with open(os.path.join(a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a , a )
with open(os.path.join(a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a , a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : List[Any] = tokenizer_class.from_pretrained(a , extra_ids=0 )
self.assertIn(a , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase__ : str = 0Xe_007
lowerCAmelCase__ : Union[str, Any] = chr(a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : Optional[int] = [AddedToken(a , lstrip=a )]
lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , extra_ids=0 )
self.assertIn(a , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : List[Any] = 'hello world'
if self.space_between_special_tokens:
lowerCAmelCase__ : Tuple = '[CLS] hello world [SEP]'
else:
lowerCAmelCase__ : str = input
lowerCAmelCase__ : List[str] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(a , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a , [output, output.lower()] )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ : Union[str, Any] = 'a'
lowerCAmelCase__ : Optional[int] = ord(a )
for attr in attributes_list:
setattr(a , attr + '_id' , a )
self.assertEqual(getattr(a , a ) , a )
self.assertEqual(getattr(a , attr + '_id' ) , a )
setattr(a , attr + '_id' , a )
self.assertEqual(getattr(a , a ) , a )
self.assertEqual(getattr(a , attr + '_id' ) , a )
setattr(a , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(a , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(a , 'additional_special_tokens_ids' ) , [] )
lowerCAmelCase__ : Optional[int] = 0Xe_006
lowerCAmelCase__ : Union[str, Any] = chr(a )
setattr(a , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(a , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(a , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass | 69 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def A ( lowercase__ : int , lowercase__ : Optional[Any] ) -> int:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def A ( lowercase__ : Optional[int] ) -> Any:
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : str , lowercase__ : int ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
UpperCamelCase__ :Union[str, Any] = tmp_path_factory.getbasetemp() / """cache"""
UpperCamelCase__ :Union[str, Any] = test_hf_cache_home / """datasets"""
UpperCamelCase__ :Optional[int] = test_hf_cache_home / """metrics"""
UpperCamelCase__ :Tuple = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowercase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowercase__ ) )
UpperCamelCase__ :Union[str, Any] = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Optional[Any] = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
@pytest.fixture(autouse=lowercase__ , scope="""session""" )
def A ( ) -> Optional[Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase__ )
def A ( lowercase__ : Tuple ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowercase__ )
@pytest.fixture
def A ( lowercase__ : Optional[int] ) -> Dict:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowercase__ ) | 45 |
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
SCREAMING_SNAKE_CASE : Dict = True
except ImportError:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( lowerCAmelCase__: Namespace ) -> Optional[Any]:
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class snake_case ( lowercase_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing', action='store_true', help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file', type=_lowercase, help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path', type=_lowercase, help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=_lowercase )
def __init__( self, _lowercase, _lowercase, _lowercase=None, *_lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = testing
SCREAMING_SNAKE_CASE_ = testing_file
SCREAMING_SNAKE_CASE_ = path
def a__ ( self ) -> Optional[int]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(_lowercase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
SCREAMING_SNAKE_CASE_ = (
Path(_lowercase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
SCREAMING_SNAKE_CASE_ = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(_lowercase ) )
else:
with open(self._testing_file, 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ), no_input=_lowercase, extra_context=_lowercase, )
SCREAMING_SNAKE_CASE_ = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json', 'r' ) as configuration_file:
SCREAMING_SNAKE_CASE_ = json.load(_lowercase )
SCREAMING_SNAKE_CASE_ = configuration['lowercase_modelname']
SCREAMING_SNAKE_CASE_ = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f"""{directory}/configuration.json""" )
SCREAMING_SNAKE_CASE_ = 'PyTorch' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = 'Flax' in generate_tensorflow_pytorch_and_flax
SCREAMING_SNAKE_CASE_ = f"""{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"""
os.makedirs(_lowercase, exist_ok=_lowercase )
os.makedirs(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}""", exist_ok=_lowercase )
# Tests require submodules as they have parent imports
with open(f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py""", 'w' ):
pass
shutil.move(
f"""{directory}/__init__.py""", f"""{model_dir}/__init__.py""", )
shutil.move(
f"""{directory}/configuration_{lowercase_model_name}.py""", f"""{model_dir}/configuration_{lowercase_model_name}.py""", )
def remove_copy_lines(_lowercase ):
with open(_lowercase, 'r' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
with open(_lowercase, 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(_lowercase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_{lowercase_model_name}.py""", f"""{model_dir}/modeling_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_{lowercase_model_name}.py""" )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_tf_{lowercase_model_name}.py""", f"""{model_dir}/modeling_tf_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_tf_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_tf_{lowercase_model_name}.py""" )
if output_flax:
if not self._testing:
remove_copy_lines(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/modeling_flax_{lowercase_model_name}.py""", f"""{model_dir}/modeling_flax_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""", f"""{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py""", )
else:
os.remove(f"""{directory}/modeling_flax_{lowercase_model_name}.py""" )
os.remove(f"""{directory}/test_modeling_flax_{lowercase_model_name}.py""" )
shutil.move(
f"""{directory}/{lowercase_model_name}.md""", f"""{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md""", )
shutil.move(
f"""{directory}/tokenization_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}.py""", )
shutil.move(
f"""{directory}/tokenization_fast_{lowercase_model_name}.py""", f"""{model_dir}/tokenization_{lowercase_model_name}_fast.py""", )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(_lowercase, _lowercase, _lowercase ):
# Create temp file
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = mkstemp()
SCREAMING_SNAKE_CASE_ = False
with fdopen(_lowercase, 'w' ) as new_file:
with open(_lowercase ) as old_file:
for line in old_file:
new_file.write(_lowercase )
if line_to_copy_below in line:
SCREAMING_SNAKE_CASE_ = True
for line_to_copy in lines_to_copy:
new_file.write(_lowercase )
if not line_found:
raise ValueError(f"""Line {line_to_copy_below} was not found in file.""" )
# Copy the file permissions from the old file to the new file
copymode(_lowercase, _lowercase )
# Remove original file
remove(_lowercase )
# Move new file
move(_lowercase, _lowercase )
def skip_units(_lowercase ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(_lowercase ):
with open(_lowercase ) as datafile:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# Below: " in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = line.split('"' )[1]
SCREAMING_SNAKE_CASE_ = skip_units(_lowercase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(_lowercase, _lowercase, _lowercase )
SCREAMING_SNAKE_CASE_ = []
elif "# Replace with" in line and "##" not in line:
SCREAMING_SNAKE_CASE_ = []
elif "##" not in line:
lines_to_copy.append(_lowercase )
remove(_lowercase )
replace_in_files(f"""{directory}/to_replace_{lowercase_model_name}.py""" )
os.rmdir(_lowercase )
| 294 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase__ : Dict =value_function
UpperCamelCase__ : Union[str, Any] =unet
UpperCamelCase__ : Optional[int] =scheduler
UpperCamelCase__ : Optional[Any] =env
UpperCamelCase__ : str =env.get_dataset()
UpperCamelCase__ : Tuple ={}
for key in self.data.keys():
try:
UpperCamelCase__ : Optional[int] =self.data[key].mean()
except: # noqa: E722
pass
UpperCamelCase__ : int ={}
for key in self.data.keys():
try:
UpperCamelCase__ : Any =self.data[key].std()
except: # noqa: E722
pass
UpperCamelCase__ : Optional[Any] =env.observation_space.shape[0]
UpperCamelCase__ : Tuple =env.action_space.shape[0]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
if type(__SCREAMING_SNAKE_CASE) is dict:
return {k: self.to_torch(__SCREAMING_SNAKE_CASE) for k, v in x_in.items()}
elif torch.is_tensor(__SCREAMING_SNAKE_CASE):
return x_in.to(self.unet.device)
return torch.tensor(__SCREAMING_SNAKE_CASE , device=self.unet.device)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
UpperCamelCase__ : List[Any] =val.clone()
return x_in
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple =x.shape[0]
UpperCamelCase__ : Tuple =None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
UpperCamelCase__ : Any =torch.full((batch_size,) , __SCREAMING_SNAKE_CASE , device=self.unet.device , dtype=torch.long)
for _ in range(__SCREAMING_SNAKE_CASE):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCamelCase__ : int =self.value_function(x.permute(0 , 2 , 1) , __SCREAMING_SNAKE_CASE).sample
UpperCamelCase__ : Optional[Any] =torch.autograd.grad([y.sum()] , [x])[0]
UpperCamelCase__ : Optional[int] =self.scheduler._get_variance(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =torch.exp(0.5 * posterior_variance)
UpperCamelCase__ : int =model_std * grad
UpperCamelCase__ : Union[str, Any] =0
UpperCamelCase__ : List[Any] =x.detach()
UpperCamelCase__ : Any =x + scale * grad
UpperCamelCase__ : Any =self.reset_xa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.action_dim)
UpperCamelCase__ : Tuple =self.unet(x.permute(0 , 2 , 1) , __SCREAMING_SNAKE_CASE).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
UpperCamelCase__ : List[Any] =self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , predict_epsilon=__SCREAMING_SNAKE_CASE)["prev_sample"]
# apply conditions to the trajectory (set the initial state)
UpperCamelCase__ : Tuple =self.reset_xa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.action_dim)
UpperCamelCase__ : Union[str, Any] =self.to_torch(__SCREAMING_SNAKE_CASE)
return x, y
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =self.normalize(__SCREAMING_SNAKE_CASE , "observations")
UpperCamelCase__ : Optional[int] =obs[None].repeat(__SCREAMING_SNAKE_CASE , axis=0)
UpperCamelCase__ : int ={0: self.to_torch(__SCREAMING_SNAKE_CASE)}
UpperCamelCase__ : Optional[int] =(batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCamelCase__ : Optional[int] =randn_tensor(__SCREAMING_SNAKE_CASE , device=self.unet.device)
UpperCamelCase__ : Dict =self.reset_xa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.action_dim)
UpperCamelCase__ : List[Any] =self.to_torch(__SCREAMING_SNAKE_CASE)
# run the diffusion process
UpperCamelCase__ , UpperCamelCase__ : Dict =self.run_diffusion(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# sort output trajectories by value
UpperCamelCase__ : Dict =y.argsort(0 , descending=__SCREAMING_SNAKE_CASE).squeeze()
UpperCamelCase__ : Dict =x[sorted_idx]
UpperCamelCase__ : Dict =sorted_values[:, :, : self.action_dim]
UpperCamelCase__ : Dict =actions.detach().cpu().numpy()
UpperCamelCase__ : Dict =self.de_normalize(__SCREAMING_SNAKE_CASE , key="actions")
# select the action with the highest value
if y is not None:
UpperCamelCase__ : Union[str, Any] =0
else:
# if we didn't run value guiding, select a random action
UpperCamelCase__ : Union[str, Any] =np.random.randint(0 , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : int =denorm_actions[selected_index, 0]
return denorm_actions
| 582 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple =ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=A_ )
UpperCamelCase__ : Tuple =parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(A_ )
EnvironmentCommand.register_subcommand(A_ )
TestCommand.register_subcommand(A_ )
RunBeamCommand.register_subcommand(A_ )
DummyDataCommand.register_subcommand(A_ )
# Parse args
UpperCamelCase__ , UpperCamelCase__ : List[Any] =parser.parse_known_args()
if not hasattr(A_ , "func" ):
parser.print_help()
exit(1 )
UpperCamelCase__ : Union[str, Any] =parse_unknown_args(A_ )
# Run
UpperCamelCase__ : Tuple =args.func(A_ , **A_ )
service.run()
if __name__ == "__main__":
main()
| 582 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 535 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
def wrapper(*_lowerCamelCase: Any, **_lowerCamelCase: Union[str, Any] ):
lowerCAmelCase = timeit.default_timer()
lowerCAmelCase = func(*_lowerCamelCase, **_lowerCamelCase )
lowerCAmelCase = timeit.default_timer() - starttime
return delta
lowerCAmelCase = func.__name__
return wrapper
def __magic_name__ ( _lowerCamelCase: dict, _lowerCamelCase: List[Any]=100, _lowerCamelCase: int=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = seq_shapes or {}
for i in range(_lowerCamelCase ):
lowerCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCamelCase, _ArrayXD ):
lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCamelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCamelCase, datasets.Sequence ):
while isinstance(_lowerCamelCase, datasets.Sequence ):
lowerCAmelCase = v.feature
lowerCAmelCase = seq_shapes[k]
lowerCAmelCase = np.random.rand(*_lowerCamelCase ).astype(v.dtype )
lowerCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Tuple, _lowerCamelCase: Union[str, Any]=100, _lowerCamelCase: List[Any]=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = generate_examples(_lowerCamelCase, num_examples=_lowerCamelCase, seq_shapes=_lowerCamelCase )
with ArrowWriter(features=_lowerCamelCase, path=_lowerCamelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase = features.encode_example(_lowerCamelCase )
writer.write(_lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowerCAmelCase = datasets.Dataset.from_file(filename=_lowerCamelCase, info=datasets.DatasetInfo(features=_lowerCamelCase ) )
return dataset
| 535 | 1 |
'''simple docstring'''
from collections.abc import Callable
class lowerCamelCase_ :
def __init__( self : List[str] , lowerCAmelCase__ : Callable | None = None ):
"""simple docstring"""
# Stores actual heap items.
SCREAMING_SNAKE_CASE : list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE : dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE : Any = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE : int = key or (lambda lowerCAmelCase__ : x)
def __lowercase ( self : Optional[Any] , lowerCAmelCase__ : int ):
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self : Any , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE : List[str] = self.arr[j], self.arr[i]
def __lowercase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self._left(__a )
SCREAMING_SNAKE_CASE : Tuple = self._right(__a )
SCREAMING_SNAKE_CASE : str = i
if left is not None and not self._cmp(__a , __a ):
SCREAMING_SNAKE_CASE : Any = left
if right is not None and not self._cmp(__a , __a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = right
return valid_parent
def __lowercase ( self : Tuple , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._parent(__a )
while parent is not None and not self._cmp(__a , __a ):
self._swap(__a , __a )
SCREAMING_SNAKE_CASE : Union[str, Any] = parent, self._parent(__a )
def __lowercase ( self : Tuple , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self._get_valid_parent(__a )
while valid_parent != index:
self._swap(__a , __a )
SCREAMING_SNAKE_CASE : List[str] = valid_parent, self._get_valid_parent(__a )
def __lowercase ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE : Optional[Any] = self.pos_map[item]
SCREAMING_SNAKE_CASE : int = [item, self.key(__a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__a )
self._heapify_down(__a )
def __lowercase ( self : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE : Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE : Optional[Any] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE : List[str] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__a )
self._heapify_down(__a )
def __lowercase ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__a )] )
else:
SCREAMING_SNAKE_CASE : Any = [item, self.key(__a )]
SCREAMING_SNAKE_CASE : Tuple = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
return self.arr[0] if self.size else None
def __lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def UpperCAmelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase ( A : Optional[Any] , A : List[Any] , A : Any , A : Dict=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(A )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
SCREAMING_SNAKE_CASE : List[str] = torch.load(A , map_location='''cpu''' )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
SCREAMING_SNAKE_CASE : Any = convert_pytorch_state_dict_to_flax(A , A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
SCREAMING_SNAKE_CASE : List[str] = convert_pytorch_sharded_state_dict_to_flax(A , A )
return flax_state_dict
def UpperCAmelCase ( A : Tuple[str] , A : np.ndarray , A : Dict[str, jnp.ndarray] , A : str , ):
def is_key_or_prefix_key_in_dict(A : Tuple[str] ) -> bool:
return len(set(A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
SCREAMING_SNAKE_CASE : List[Any] = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A ):
SCREAMING_SNAKE_CASE : Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE : Optional[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
SCREAMING_SNAKE_CASE : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
SCREAMING_SNAKE_CASE : Optional[int] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
SCREAMING_SNAKE_CASE : Dict = pt_tuple_key[-2] + '''_v'''
if name is not None:
SCREAMING_SNAKE_CASE : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase ( A : Union[str, Any] , A : List[str] ):
# convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE : str = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE : str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
SCREAMING_SNAKE_CASE : Tuple = flax_model.params['''params''']
else:
SCREAMING_SNAKE_CASE : Optional[int] = flax_model.params
SCREAMING_SNAKE_CASE : int = flatten_dict(A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE : int = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(A )
SCREAMING_SNAKE_CASE : Any = {}
SCREAMING_SNAKE_CASE : int = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE : Optional[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
SCREAMING_SNAKE_CASE : str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE : str = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : str = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(A )
return unflatten_dict(A )
def UpperCAmelCase ( A : Any , A : Union[str, Any] ):
import torch
# Load the index
SCREAMING_SNAKE_CASE : str = {}
for shard_file in shard_filenames:
# load using msgpack utils
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
SCREAMING_SNAKE_CASE : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
SCREAMING_SNAKE_CASE : Tuple = flax_model.params['''params''']
SCREAMING_SNAKE_CASE : Optional[int] = flatten_dict(A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
SCREAMING_SNAKE_CASE : Optional[int] = flax_model.params
SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(A )
SCREAMING_SNAKE_CASE : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
SCREAMING_SNAKE_CASE : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE : Tuple = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
SCREAMING_SNAKE_CASE : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
SCREAMING_SNAKE_CASE : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
SCREAMING_SNAKE_CASE : List[str] = jnp.asarray(A )
continue
if "var" in flax_key[-1]:
SCREAMING_SNAKE_CASE : Any = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Any = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(A )
return unflatten_dict(A )
def UpperCAmelCase ( A : List[str] , A : str ):
SCREAMING_SNAKE_CASE : str = os.path.abspath(A )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(A , '''rb''' ) as state_f:
try:
SCREAMING_SNAKE_CASE : Optional[int] = from_bytes(A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def UpperCAmelCase ( A : int , A : int ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE : Any = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE : int = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
SCREAMING_SNAKE_CASE : Union[str, Any] = flatten_dict(A )
SCREAMING_SNAKE_CASE : str = pt_model.state_dict()
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
SCREAMING_SNAKE_CASE : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
SCREAMING_SNAKE_CASE : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A ) not in pt_model_dict:
# conv layer
SCREAMING_SNAKE_CASE : Tuple = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE : List[Any] = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ) not in pt_model_dict:
# linear layer
SCREAMING_SNAKE_CASE : List[str] = flax_key_tuple[:-1] + ('''weight''',)
SCREAMING_SNAKE_CASE : List[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE : List[Any] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE : Dict = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
SCREAMING_SNAKE_CASE : Optional[int] = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
SCREAMING_SNAKE_CASE : Any = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
SCREAMING_SNAKE_CASE : List[str] = '''.'''.join(A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
SCREAMING_SNAKE_CASE : Any = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
SCREAMING_SNAKE_CASE : Tuple = key.split('''.''' )
SCREAMING_SNAKE_CASE : str = None
if key_components[-3::2] == ["parametrizations", "original0"]:
SCREAMING_SNAKE_CASE : List[Any] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
SCREAMING_SNAKE_CASE : Any = key_components[-2] + '''_v'''
if name is not None:
SCREAMING_SNAKE_CASE : List[Any] = key_components[:-3] + [name]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''.'''.join(A )
SCREAMING_SNAKE_CASE : Optional[int] = key
if flax_key in special_pt_names:
SCREAMING_SNAKE_CASE : Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE : Tuple = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 464 | 0 |
from collections.abc import Iterable
from typing import Any
class A__ :
def __init__( self : int , _a : int | None = None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =value
_SCREAMING_SNAKE_CASE =None # Added in order to delete a node easier
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __repr__( self : Any ) -> int:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"{self.value}": (self.left, self.right)} , indent=1 )
class A__ :
def __init__( self : List[Any] , _a : Node | None = None ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =root
def __str__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return str(self.root )
def __UpperCamelCase ( self : str , _a : Node , _a : Node | None ) -> Tuple:
"""simple docstring"""
if new_children is not None: # reset its kids
_SCREAMING_SNAKE_CASE =node.parent
if node.parent is not None: # reset its parent
if self.is_right(_lowerCAmelCase ): # If it is the right children
_SCREAMING_SNAKE_CASE =new_children
else:
_SCREAMING_SNAKE_CASE =new_children
else:
_SCREAMING_SNAKE_CASE =new_children
def __UpperCamelCase ( self : List[str] , _a : Node ) -> str:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.root is None
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Node(_lowerCAmelCase ) # create a new Node
if self.empty(): # if Tree is empty
_SCREAMING_SNAKE_CASE =new_node # set its root
else: # Tree is not empty
_SCREAMING_SNAKE_CASE =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_SCREAMING_SNAKE_CASE =new_node # We insert the new node in a leaf
break
else:
_SCREAMING_SNAKE_CASE =parent_node.left
else:
if parent_node.right is None:
_SCREAMING_SNAKE_CASE =new_node
break
else:
_SCREAMING_SNAKE_CASE =parent_node.right
_SCREAMING_SNAKE_CASE =parent_node
def __UpperCamelCase ( self : str , *_a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for value in values:
self.__insert(_lowerCAmelCase )
def __UpperCamelCase ( self : int , _a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''' )
else:
_SCREAMING_SNAKE_CASE =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_SCREAMING_SNAKE_CASE =node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self : Dict , _a : Node | None = None ) -> List[Any]:
"""simple docstring"""
if node is None:
if self.root is None:
return None
_SCREAMING_SNAKE_CASE =self.root
if not self.empty():
while node.right is not None:
_SCREAMING_SNAKE_CASE =node.right
return node
def __UpperCamelCase ( self : Tuple , _a : Node | None = None ) -> str:
"""simple docstring"""
if node is None:
_SCREAMING_SNAKE_CASE =self.root
if self.root is None:
return None
if not self.empty():
_SCREAMING_SNAKE_CASE =self.root
while node.left is not None:
_SCREAMING_SNAKE_CASE =node.left
return node
def __UpperCamelCase ( self : Tuple , _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.search(_lowerCAmelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_lowerCAmelCase , _lowerCAmelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_lowerCAmelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_lowerCAmelCase , node.left )
else:
_SCREAMING_SNAKE_CASE =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_SCREAMING_SNAKE_CASE =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self : List[str] , _a : Node | None ) -> Optional[Any]:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self : Optional[Any] , _a : Optional[Any]=None ) -> Dict:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self : Tuple , _a : list , _a : Node | None ) -> Tuple:
"""simple docstring"""
if node:
self.inorder(_lowerCAmelCase , node.left )
arr.append(node.value )
self.inorder(_lowerCAmelCase , node.right )
def __UpperCamelCase ( self : Dict , _a : int , _a : Node ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
self.inorder(_lowerCAmelCase , _lowerCAmelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
if curr_node is not None:
_SCREAMING_SNAKE_CASE =postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =(8, 3, 6, 1, 10, 14, 13, 4, 7)
_SCREAMING_SNAKE_CASE =BinarySearchTree()
for i in testlist:
t.insert(__snake_case)
# Prints all the elements of the list in order traversal
print(__snake_case)
if t.search(6) is not None:
print('''The value 6 exists''')
else:
print('''The value 6 doesn\'t exist''')
if t.search(-1) is not None:
print('''The value -1 exists''')
else:
print('''The value -1 doesn\'t exist''')
if not t.empty():
print('''Max Value: ''' ,t.get_max().value) # type: ignore
print('''Min Value: ''' ,t.get_min().value) # type: ignore
for i in testlist:
t.remove(__snake_case)
print(__snake_case)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 691 | import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCamelCase__ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( __snake_case , __snake_case , __snake_case=None ) -> str:
if rng is None:
__snake_case = random.Random()
__snake_case = 1
for dim in shape:
total_dims *= dim
__snake_case = []
for _ in range(__snake_case ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__snake_case = np.array(__snake_case , dtype=jnp.intaa ).reshape(__snake_case )
return output
def _lowerCamelCase( __snake_case , __snake_case=None ) -> Optional[int]:
__snake_case = ids_tensor(__snake_case , vocab_size=2 , rng=__snake_case )
# make sure that at least one token is attended to for each batch
__snake_case = 1
return attn_mask
@require_flax
class UpperCamelCase :
__UpperCamelCase = None
__UpperCamelCase = ()
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__snake_case = 2
__snake_case = inputs["input_ids"].shape[-1] // 2
__snake_case = inputs["input_ids"][:max_batch_size, :sequence_length]
__snake_case = jnp.ones_like(_lowerCAmelCase )
__snake_case = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__snake_case = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__snake_case = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 0
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning
__snake_case = getattr(_lowerCAmelCase ,_lowerCAmelCase )
__snake_case = pt_model_class(_lowerCAmelCase ).eval()
__snake_case = load_flax_weights_in_pytorch_model(_lowerCAmelCase ,flax_model.params )
__snake_case = flax_model.generate(_lowerCAmelCase ).sequences
__snake_case = pt_model.generate(torch.tensor(_lowerCAmelCase ,dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__snake_case = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() ,flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = False
__snake_case = max_length
__snake_case = 2
__snake_case = 2
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] ,input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = True
__snake_case = max_length
__snake_case = 0.8
__snake_case = 10
__snake_case = 0.3
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
__snake_case = max_length
__snake_case = 2
__snake_case = 1
__snake_case = 8
__snake_case = 9
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = False
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = True
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self._get_input_ids_and_config()
# pad attention mask on the left
__snake_case = attention_mask.at[(0, 0)].set(0 )
__snake_case = 2
__snake_case = max_length
for model_class in self.all_generative_model_classes:
__snake_case = model_class(_lowerCAmelCase )
__snake_case = model.generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] ,_lowerCAmelCase )
__snake_case = jit(model.generate )
__snake_case = jit_generate(_lowerCAmelCase ,attention_mask=_lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() ,jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
__snake_case = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__snake_case = "Hello world"
__snake_case = tokenizer(_lowerCAmelCase ,return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_lowerCAmelCase ,"do_samples" ):
model.generate(_lowerCAmelCase ,do_samples=_lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_lowerCAmelCase ,"foo" ):
__snake_case = {"foo": "bar"}
model.generate(_lowerCAmelCase ,**_lowerCAmelCase )
| 524 | 0 |
'''simple docstring'''
def lowercase_ ( lowercase__ , lowercase__ ) ->int:
_snake_case: List[str] = 0
_snake_case: Tuple = len(lowercase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_snake_case: Union[str, Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
_snake_case: List[str] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_snake_case: int = left
_snake_case: str = point
elif point > right:
_snake_case: str = right
_snake_case: Any = point
else:
if item < current_item:
_snake_case: List[Any] = point - 1
else:
_snake_case: Dict = point + 1
return None
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->Tuple:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_snake_case: List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowercase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif point > right:
return interpolation_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , lowercase__ , point - 1 )
else:
return interpolation_search_by_recursion(
lowercase__ , lowercase__ , point + 1 , lowercase__ )
def lowercase_ ( lowercase__ ) ->Tuple:
if collection != sorted(lowercase__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
A : Union[str, Any] = 0
if debug == 1:
A : List[str] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
A : Dict = 67
A : Any = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print('Not found')
| 715 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=13 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Dict=3 , __snake_case : Optional[Any]=True , __snake_case : Optional[int]=True , __snake_case : Optional[int]=32 , __snake_case : Optional[int]=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=10 , __snake_case : Any=0.02 , __snake_case : List[str]=None , __snake_case : Tuple=2 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Tuple = batch_size
_snake_case: str = image_size
_snake_case: int = patch_size
_snake_case: Union[str, Any] = num_channels
_snake_case: Dict = is_training
_snake_case: Optional[Any] = use_labels
_snake_case: Optional[Any] = hidden_size
_snake_case: Tuple = num_hidden_layers
_snake_case: List[Any] = num_attention_heads
_snake_case: Union[str, Any] = intermediate_size
_snake_case: List[str] = hidden_act
_snake_case: Tuple = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: str = type_sequence_label_size
_snake_case: Any = initializer_range
_snake_case: str = scope
_snake_case: Union[str, Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case: Tuple = (image_size // patch_size) ** 2
_snake_case: List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case: List[str] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Dict = ViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = ViTForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Dict = model(__snake_case )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_snake_case: List[str] = 1
_snake_case: Tuple = ViTForMaskedImageModeling(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Union[str, Any] = model(__snake_case )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = self.type_sequence_label_size
_snake_case: Union[str, Any] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case: Tuple = 1
_snake_case: Optional[int] = ViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case: Any = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Optional[int] = ViTModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case , _snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: Optional[int] = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case: int = model_class(__snake_case )
_snake_case: List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case: List[Any] = [*signature.parameters.keys()]
_snake_case: str = ['pixel_values']
self.assertListEqual(arg_names[:1] , __snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Any = ViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowercase_ ( ) ->List[Any]:
_snake_case: Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Optional[int] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__snake_case )
_snake_case: Dict = self.default_image_processor
_snake_case: Optional[Any] = prepare_img()
_snake_case: List[str] = image_processor(images=__snake_case , return_tensors='pt' ).to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: Optional[int] = model(**__snake_case )
# verify the logits
_snake_case: Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
_snake_case: Dict = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__snake_case )
_snake_case: Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_80 )
_snake_case: Optional[int] = prepare_img()
_snake_case: Dict = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Optional[Any] = inputs.pixel_values.to(__snake_case )
# forward pass
with torch.no_grad():
_snake_case: str = model(__snake_case , interpolate_pos_encoding=__snake_case )
# verify the logits
_snake_case: List[str] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
_snake_case: Any = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
_snake_case: Dict = self.default_image_processor
_snake_case: Any = prepare_img()
_snake_case: str = image_processor(images=__snake_case , return_tensors='pt' )
_snake_case: Any = inputs.pixel_values.to(__snake_case )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_snake_case: int = model(__snake_case )
| 273 | 0 |
import numpy as np
from PIL import Image
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : List[Any] = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A_ : str = 0
A_ : Any = 0
A_ : Optional[int] = 0
A_ : Optional[Any] = 0
# compute the shape of the output matrix
A_ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
A_ : str = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
A_ : Any = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A_ : Tuple = 0
A_ : Optional[int] = 0
return updated_arr
def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : int ,__lowercase : int ):
'''simple docstring'''
A_ : int = np.array(__lowercase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
A_ : Any = 0
A_ : Optional[int] = 0
A_ : List[str] = 0
A_ : Union[str, Any] = 0
# compute the shape of the output matrix
A_ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
A_ : int = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
A_ : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
A_ : List[Any] = 0
A_ : int = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
_UpperCAmelCase = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 558 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[int]=False ):
'''simple docstring'''
try:
A_ : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A_ : Any = default
else:
# KEY is set, convert it to True or False.
try:
A_ : Dict = strtobool(__lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
_UpperCAmelCase = parse_flag_from_env("""RUN_SLOW""", default=False)
_UpperCAmelCase = parse_flag_from_env("""RUN_REMOTE""", default=False)
_UpperCAmelCase = parse_flag_from_env("""RUN_LOCAL""", default=True)
_UpperCAmelCase = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_UpperCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_UpperCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_UpperCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_UpperCAmelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_UpperCAmelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_UpperCAmelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_UpperCAmelCase = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires faiss' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires regex' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
A_ : Union[str, Any] = unittest.skip('test requires elasticsearch' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
A_ : Dict = unittest.skip('test requires sqlalchemy' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
A_ : Tuple = unittest.skip('test requires PyTorch' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if not config.TF_AVAILABLE:
A_ : int = unittest.skip('test requires TensorFlow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
A_ : Union[str, Any] = unittest.skip('test requires JAX' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
A_ : Union[str, Any] = unittest.skip('test requires Pillow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : List[str] ):
'''simple docstring'''
def _require_spacy_model(__lowercase : List[Any] ):
try:
import spacy # noqa F401
spacy.load(__lowercase )
except ImportError:
return unittest.skip('test requires spacy' )(__lowercase )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(__lowercase ) )(__lowercase )
else:
return test_case
return _require_spacy_model
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(__lowercase )
else:
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
A_ : Union[str, Any] = unittest.skip('test is slow' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Optional[Any] ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
A_ : Union[str, Any] = unittest.skip('test is local' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
A_ : Dict = unittest.skip('test is packaged' )(__lowercase )
return test_case
def UpperCamelCase ( __lowercase : Tuple ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
A_ : Any = unittest.skip('test requires remote' )(__lowercase )
return test_case
def UpperCamelCase ( *__lowercase : Optional[int] ):
'''simple docstring'''
def decorate(cls : List[Any] ):
for name, fn in cls.__dict__.items():
if callable(__lowercase ) and name.startswith('test' ):
for decorator in decorators:
A_ : str = decorator(__lowercase )
setattr(cls ,__lowercase ,__lowercase )
return cls
return decorate
class UpperCAmelCase ( __A ):
'''simple docstring'''
pass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 1
lowerCamelCase_ = 2
@contextmanager
def UpperCamelCase ( __lowercase : List[Any]=OfflineSimulationMode.CONNECTION_FAILS ,__lowercase : int=1e-1_6 ):
'''simple docstring'''
A_ : Any = requests.Session().request
def timeout_request(__lowercase : Dict ,__lowercase : Optional[Any] ,__lowercase : int ,**__lowercase : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
A_ : Optional[Any] = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
A_ : Any = timeout
try:
return online_request(__lowercase ,__lowercase ,**__lowercase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
A_ : int = url
A_ : Any = e.args[0]
A_ : int = (max_retry_error.args[0].replace('10.255.255.1' ,f'''OfflineMock[{url}]''' ),)
A_ : Optional[int] = (max_retry_error,)
raise
def raise_connection_error(__lowercase : List[Any] ,__lowercase : List[Any] ,**__lowercase : Optional[Any] ):
raise requests.ConnectionError('Offline mode is enabled.' ,request=__lowercase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' ,__lowercase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' ,__lowercase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' ,__lowercase ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def UpperCamelCase ( *__lowercase : Union[str, Any] ,**__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : int = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__lowercase ,**__lowercase ) as tmp_dir:
try:
os.chdir(__lowercase )
yield
finally:
os.chdir(__lowercase )
@contextmanager
def UpperCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def UpperCamelCase ( ):
'''simple docstring'''
import gc
gc.collect()
A_ : Optional[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Any ):
'''simple docstring'''
return deepcopy(__lowercase ).integers(0 ,1_00 ,10 ).tolist() == deepcopy(__lowercase ).integers(0 ,1_00 ,10 ).tolist()
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(__lowercase : List[Any] ,*__lowercase : Dict ,**__lowercase : str ):
try:
return func(*__lowercase ,**__lowercase )
except HTTPError as err:
if str(__lowercase ).startswith('500' ) or str(__lowercase ).startswith('502' ):
pytest.xfail(str(__lowercase ) )
raise err
return decorator.decorator(_wrapper ,__lowercase )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : int = returncode
A_ : Any = stdout
A_ : Any = stderr
async def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : str ):
'''simple docstring'''
while True:
A_ : int = await stream.readline()
if line:
callback(__lowercase )
else:
break
async def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : List[str]=None ,__lowercase : List[str]=None ,__lowercase : Dict=None ,__lowercase : Dict=False ,__lowercase : str=False ):
'''simple docstring'''
if echo:
print('\nRunning: ' ,' '.join(__lowercase ) )
A_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=__lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=__lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A_ : Optional[Any] = []
A_ : str = []
def tee(__lowercase : Union[str, Any] ,__lowercase : Union[str, Any] ,__lowercase : List[str] ,__lowercase : Tuple="" ):
A_ : Optional[Any] = line.decode('utf-8' ).rstrip()
sink.append(__lowercase )
if not quiet:
print(__lowercase ,__lowercase ,file=__lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout ,lambda __lowercase : tee(__lowercase ,__lowercase ,sys.stdout ,label='stdout:' ) ),
_read_stream(p.stderr ,lambda __lowercase : tee(__lowercase ,__lowercase ,sys.stderr ,label='stderr:' ) ),
] ,timeout=__lowercase ,)
return _RunOutput(await p.wait() ,__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Union[str, Any]=None ,__lowercase : Tuple=None ,__lowercase : Optional[int]=1_80 ,__lowercase : Dict=False ,__lowercase : List[str]=True ):
'''simple docstring'''
A_ : Optional[Any] = asyncio.get_event_loop()
A_ : str = loop.run_until_complete(
_stream_subprocess(__lowercase ,env=__lowercase ,stdin=__lowercase ,timeout=__lowercase ,quiet=__lowercase ,echo=__lowercase ) )
A_ : str = ' '.join(__lowercase )
if result.returncode > 0:
A_ : Union[str, Any] = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''' )
return result
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = os.environ.get('PYTEST_XDIST_WORKER' ,'gw0' )
A_ : Dict = re.sub(r'^gw' ,'' ,__lowercase ,0 ,re.M )
return int(__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : int = 2_95_00
A_ : Any = pytest_xdist_worker_id()
return port + uniq_delta
| 558 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : List[str] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE_ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
SCREAMING_SNAKE_CASE_ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
SCREAMING_SNAKE_CASE_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
a = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
a = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
a = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
a = text_classifier("This is great !" , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
a = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
a = text_classifier(["This is great !", "Something else"] , return_all_scores=__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def __UpperCAmelCase ( self : str ) -> str:
import torch
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = pipeline("text-classification" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
a = pipeline("text-classification" , framework="tf" )
a = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
a = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
a = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
def __UpperCAmelCase ( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ) -> Optional[int]:
a = TextClassificationPipeline(model=__lowerCamelCase , tokenizer=__lowerCamelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> Dict:
a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a = "HuggingFace is in"
a = text_classifier(__lowerCamelCase )
self.assertEqual(nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a = ["HuggingFace is in ", "Paris is in France"]
a = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}, {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a = text_classifier(__lowerCamelCase , top_k=__lowerCamelCase )
a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [[{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N, [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] * N] , )
a = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a = text_classifier(__lowerCamelCase )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__lowerCamelCase ):
text_classifier(__lowerCamelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , [{"label": ANY(__lowerCamelCase ), "score": ANY(__lowerCamelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : List[str] = random.Random()
def _lowercase ( __lowerCamelCase : Dict ,__lowerCamelCase : Optional[Any]=1.0 ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Tuple=None ) -> str:
'''simple docstring'''
if rng is None:
UpperCamelCase__ : Optional[int] = global_rng
UpperCamelCase__ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any], __lowerCamelCase : List[Any], __lowerCamelCase : Tuple=7, __lowerCamelCase : int=4_00, __lowerCamelCase : Tuple=20_00, __lowerCamelCase : Dict=20_48, __lowerCamelCase : List[Any]=1_28, __lowerCamelCase : List[Any]=1, __lowerCamelCase : List[str]=5_12, __lowerCamelCase : List[str]=30, __lowerCamelCase : Optional[int]=4_41_00, ) -> Tuple:
UpperCamelCase__ : Any = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : List[Any] = min_seq_length
UpperCamelCase__ : List[str] = max_seq_length
UpperCamelCase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ : Any = spectrogram_length
UpperCamelCase__ : str = feature_size
UpperCamelCase__ : Union[str, Any] = num_audio_channels
UpperCamelCase__ : Dict = hop_length
UpperCamelCase__ : int = chunk_length
UpperCamelCase__ : Optional[Any] = sampling_rate
def __lowercase( self : Dict ) -> Union[str, Any]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __lowercase( self : int, __lowerCamelCase : Any=False, __lowerCamelCase : List[Any]=False ) -> Optional[Any]:
def _flatten(__lowerCamelCase : Optional[int] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
UpperCamelCase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ : Tuple = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( __lowerCamelCase , unittest.TestCase ):
a__ : Optional[int] = TvltFeatureExtractor
def __lowercase( self : Any ) -> Tuple:
UpperCamelCase__ : int = TvltFeatureExtractionTester(self )
def __lowercase( self : Optional[int] ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase, '''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase, '''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase, '''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCamelCase, '''hop_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase, '''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase, '''sampling_rate''' ) )
def __lowercase( self : Dict ) -> int:
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
UpperCamelCase__ : Dict = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
UpperCamelCase__ : List[Any] = feat_extract_first.to_dict()
UpperCamelCase__ : Dict = feat_extract_second.to_dict()
UpperCamelCase__ : Dict = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : str = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase, __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
def __lowercase( self : Optional[int] ) -> Tuple:
UpperCamelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ : List[Any] = os.path.join(__lowerCamelCase, '''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCamelCase )
UpperCamelCase__ : Any = self.feature_extraction_class.from_json_file(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = feat_extract_first.to_dict()
UpperCamelCase__ : Union[str, Any] = feat_extract_second.to_dict()
UpperCamelCase__ : Any = dict_first.pop('''mel_filters''' )
UpperCamelCase__ : Optional[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase, __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase, __lowerCamelCase )
def __lowercase( self : Optional[int] ) -> List[Any]:
# Initialize feature_extractor
UpperCamelCase__ : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ : Any = [floats_list((1, x) )[0] for x in range(8_00, 14_00, 2_00 )]
UpperCamelCase__ : Dict = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase__ : Any = feature_extractor(np_speech_inputs[0], return_tensors='''np''', sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase__ : List[str] = feature_extractor(__lowerCamelCase, return_tensors='''np''', sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase__ : Optional[int] = feature_extractor(
__lowerCamelCase, return_tensors='''np''', sampling_rate=4_41_00, mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
UpperCamelCase__ : Any = np.asarray(__lowerCamelCase )
UpperCamelCase__ : str = feature_extractor(__lowerCamelCase, return_tensors='''np''', sampling_rate=4_41_00 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __lowercase( self : int, __lowerCamelCase : str ) -> str:
UpperCamelCase__ : Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
UpperCamelCase__ : Union[str, Any] = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[Any] ) -> Optional[int]:
UpperCamelCase__ : Optional[int] = self._load_datasamples(1 )
UpperCamelCase__ : Tuple = TvltFeatureExtractor()
UpperCamelCase__ : List[str] = feature_extractor(__lowerCamelCase, return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape, (1, 1, 1_92, 1_28) )
UpperCamelCase__ : Optional[int] = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2], __lowerCamelCase, atol=1e-4 ) )
| 344 |
def _lowercase ( __lowerCamelCase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : List[Any] = str(__lowerCamelCase )
while len(__lowerCamelCase ) != 1:
UpperCamelCase__ : int = [int(__lowerCamelCase ) for i in num_string]
UpperCamelCase__ : int = 1
for i in range(0 ,len(__lowerCamelCase ) ):
total *= numbers[i]
UpperCamelCase__ : Optional[int] = str(__lowerCamelCase )
steps += 1
return steps
def _lowercase ( __lowerCamelCase : int ) -> int:
'''simple docstring'''
if not isinstance(__lowerCamelCase ,__lowerCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Any = str(__lowerCamelCase )
while len(__lowerCamelCase ) != 1:
UpperCamelCase__ : List[Any] = [int(__lowerCamelCase ) for i in num_string]
UpperCamelCase__ : List[Any] = 0
for i in range(0 ,len(__lowerCamelCase ) ):
total += numbers[i]
UpperCamelCase__ : int = str(__lowerCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : int = '''instructblip_vision_model'''
def __init__( self : List[str] , a : Union[str, Any]=1408 , a : Optional[Any]=6144 , a : str=39 , a : Dict=16 , a : List[str]=224 , a : List[str]=14 , a : List[Any]="gelu" , a : Optional[Any]=1E-6 , a : List[str]=0.0 , a : Dict=1E-10 , a : Optional[Any]=True , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**a )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = patch_size
lowercase = image_size
lowercase = initializer_range
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = hidden_act
lowercase = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Any , a : Union[str, os.PathLike] , **a : List[str] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
lowercase , lowercase = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Dict = '''instructblip_qformer'''
def __init__( self : Tuple , a : Union[str, Any]=30522 , a : Optional[int]=768 , a : Dict=12 , a : List[Any]=12 , a : int=3072 , a : int="gelu" , a : str=0.1 , a : Optional[int]=0.1 , a : Optional[Any]=512 , a : Optional[int]=0.02 , a : List[str]=1E-12 , a : List[Any]=0 , a : int="absolute" , a : str=2 , a : str=1408 , **a : Optional[int] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = cross_attention_frequency
lowercase = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : List[Any] , a : Union[str, os.PathLike] , **a : Dict ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(a )
lowercase , lowercase = cls.get_config_dict(a , **a )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowercase = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : List[Any] = '''instructblip'''
__lowerCAmelCase : Optional[Any] = True
def __init__( self : Union[str, Any] , a : Optional[int]=None , a : Tuple=None , a : Optional[int]=None , a : Any=32 , **a : Tuple ) -> Optional[int]:
"""simple docstring"""
super().__init__(**a )
if vision_config is None:
lowercase = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
lowercase = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
lowercase = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase = InstructBlipVisionConfig(**a )
lowercase = InstructBlipQFormerConfig(**a )
lowercase = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowercase = CONFIG_MAPPING[text_model_type](**a )
lowercase = self.text_config.tie_word_embeddings
lowercase = self.text_config.is_encoder_decoder
lowercase = num_query_tokens
lowercase = self.vision_config.hidden_size
lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase = 1.0
lowercase = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , a : InstructBlipVisionConfig , a : InstructBlipQFormerConfig , a : PretrainedConfig , **a : Dict , ) -> List[str]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a , )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.vision_config.to_dict()
lowercase = self.qformer_config.to_dict()
lowercase = self.text_config.to_dict()
lowercase = self.__class__.model_type
return output | 705 |
"""simple docstring"""
import numpy as np
from PIL import Image
def A_ ( __UpperCamelCase : np.ndarray , __UpperCamelCase : int , __UpperCamelCase : int ):
lowercase = np.array(__UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
def A_ ( __UpperCamelCase : np.ndarray , __UpperCamelCase : int , __UpperCamelCase : int ):
lowercase = np.array(__UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
# compute the shape of the output matrix
lowercase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase = 0
lowercase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__lowerCAmelCase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show() | 396 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase : Optional[int] =PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
torch.manual_seed(0 )
lowercase : Union[str, Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[int] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase : int =CLIPTextModel(UpperCAmelCase__ )
lowercase : List[Any] =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[str] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int]=0 ):
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowercase : str =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : Any =Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
lowercase : Dict =torch.manual_seed(UpperCAmelCase__ )
else:
lowercase : List[Any] =torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Tuple ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : str =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Tuple =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : int =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple =self.get_dummy_components()
lowercase : Optional[int] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Tuple ='''french fries'''
lowercase : Dict =sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowercase : List[str] =output.images
lowercase : Tuple =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Any =np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str =self.get_dummy_components()
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =[inputs['''prompt''']] * 2
lowercase : int =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_55.0
lowercase : Tuple =torch.from_numpy(UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase : List[Any] =image / 2 + 0.5
lowercase : List[str] =image.permute(0 , 3 , 1 , 2 )
lowercase : List[str] =image.repeat(2 , 1 , 1 , 1 )
lowercase : List[Any] =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Optional[int] =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Union[str, Any] =np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] =self.get_dummy_components()
lowercase : Any =EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase : Any =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : Dict =self.get_dummy_inputs(UpperCAmelCase__ )
lowercase : Any =sd_pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1]
lowercase : List[str] =[round(UpperCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(UpperCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : Dict =np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_components()
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline(**UpperCAmelCase__ )
lowercase : Union[str, Any] =VaeImageProcessor(do_resize=UpperCAmelCase__ , do_normalize=UpperCAmelCase__ )
lowercase : str =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase : List[str] =pipe(**self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' ) )[0]
lowercase : List[str] =components['''vae''']
lowercase : str =self.get_dummy_inputs_by_type(UpperCAmelCase__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : List[str] =vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )[0]
lowercase : Optional[int] =np.abs(out - out_latents_inputs ).max()
self.assertLess(UpperCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : str=0 ):
'''simple docstring'''
lowercase : Any =torch.manual_seed(UpperCAmelCase__ )
lowercase : List[Any] =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowercase : Optional[int] ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =self.get_inputs()
lowercase : Any =pipe(**UpperCAmelCase__ ).images
lowercase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : int =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Tuple =self.get_inputs()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ ).images
lowercase : Any =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Any =np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : Optional[Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ )
lowercase : Dict =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] =self.get_inputs()
lowercase : Dict =pipe(**UpperCAmelCase__ ).images
lowercase : List[str] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] =np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : str =0
def callback_fn(UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor ) -> None:
lowercase : str =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : Optional[int] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Any =latents[0, -3:, -3:, -1]
lowercase : int =np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Union[str, Any] =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Dict =latents[0, -3:, -3:, -1]
lowercase : Any =np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Union[str, Any] =False
lowercase : Union[str, Any] =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[int] =self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Tuple =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=UpperCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[str] =pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict =self.get_inputs()
lowercase : List[Any] =pipe(**UpperCAmelCase__ )
lowercase : List[str] =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[str] =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : Dict =inputs['''image'''].resize((504, 504) )
lowercase : Any ='''timbrooks/instruct-pix2pix'''
lowercase : str =StableDiffusionInstructPixaPixPipeline.from_pretrained(
UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] =pipe(**UpperCAmelCase__ )
lowercase : Dict =output.images[0]
lowercase : Tuple =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Optional[Any] =np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 92 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : int = 3_2 , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowerCamelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowerCamelCase : bool = True , __lowerCamelCase : str=7 , __lowerCamelCase : Union[str, Any]=3_0 , __lowerCamelCase : Tuple=4_0_0 , __lowerCamelCase : List[Any]=3 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 2_8_8}
_SCREAMING_SNAKE_CASE = size_divisor
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = do_center_crop
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : int=False ):
"""simple docstring"""
if not batched:
_SCREAMING_SNAKE_CASE = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
_SCREAMING_SNAKE_CASE = size / min(__lowerCamelCase , __lowerCamelCase )
if h < w:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = size, scale * w
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = scale * h, size
_SCREAMING_SNAKE_CASE = int((1_3_3_3 / 8_0_0) * size )
if max(__lowerCamelCase , __lowerCamelCase ) > max_size:
_SCREAMING_SNAKE_CASE = max_size / max(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = newh * scale
_SCREAMING_SNAKE_CASE = neww * scale
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size_divisor" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
# Initialize image processor
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 418 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =['''image_processor''', '''tokenizer''']
snake_case__ : Optional[int] ='''ViltImageProcessor'''
snake_case__ : Dict =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: List[Any] , __a: Optional[Any]=None , __a: Optional[Any]=None , **__a: Any )-> Any:
lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase : Dict = kwargs.pop("""feature_extractor""" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
lowerCamelCase : Dict = self.image_processor
def __call__( self: int , __a: Optional[int] , __a: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a: bool = True , __a: Union[bool, str, PaddingStrategy] = False , __a: Union[bool, str, TruncationStrategy] = None , __a: Optional[int] = None , __a: int = 0 , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[bool] = None , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = True , __a: Optional[Union[str, TensorType]] = None , **__a: Dict , )-> BatchEncoding:
lowerCamelCase : List[Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
lowerCamelCase : Dict = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def a__ ( self: str , *__a: Dict , **__a: Optional[int] )-> int:
return self.tokenizer.batch_decode(*__a , **__a )
def a__ ( self: Union[str, Any] , *__a: Optional[int] , **__a: int )-> List[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def a__ ( self: Any )-> Dict:
lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self: List[str] )-> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def a__ ( self: List[str] )-> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 703 |
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: Optional[int] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 526 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowercase = datasets.logging.get_logger(__name__)
lowercase = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowercase = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowercase = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def a_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def a_ ( self , a__ ):
if self.config_name == "default":
__SCREAMING_SNAKE_CASE : Union[str, Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def a_ ( self , a__ , a__ , a__ , a__=None , a__=False ):
if gpus is None:
__SCREAMING_SNAKE_CASE : List[str] = 1 if torch.cuda.is_available() else 0
__SCREAMING_SNAKE_CASE : Any = {"src": sources, "mt": predictions, "ref": references}
__SCREAMING_SNAKE_CASE : int = [dict(zip(a__ , a__ ) ) for t in zip(*data.values() )]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = self.scorer.predict(a__ , gpus=a__ , progress_bar=a__ )
return {"mean_score": mean_score, "scores": scores}
| 211 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase = 100
UpperCAmelCase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
snake_case_ = set()
snake_case_ = 42
snake_case_ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 5000 )-> int | None:
"""simple docstring"""
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ):
if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 703 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
UpperCAmelCase = """pytorch_model.bin"""
UpperCAmelCase = """pytorch_model.bin.index.json"""
UpperCAmelCase = """adapter_config.json"""
UpperCAmelCase = """adapter_model.bin"""
UpperCAmelCase = """adapter_model.safetensors"""
UpperCAmelCase = """tf_model.h5"""
UpperCAmelCase = """tf_model.h5.index.json"""
UpperCAmelCase = """model.ckpt"""
UpperCAmelCase = """flax_model.msgpack"""
UpperCAmelCase = """flax_model.msgpack.index.json"""
UpperCAmelCase = """model.safetensors"""
UpperCAmelCase = """model.safetensors.index.json"""
UpperCAmelCase = """config.json"""
UpperCAmelCase = """preprocessor_config.json"""
UpperCAmelCase = FEATURE_EXTRACTOR_NAME
UpperCAmelCase = """generation_config.json"""
UpperCAmelCase = """modelcard.json"""
UpperCAmelCase = """▁"""
UpperCAmelCase = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
UpperCAmelCase = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
UpperCAmelCase = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
UpperCAmelCase = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[str]:
"""simple docstring"""
if version.parse(SCREAMING_SNAKE_CASE ) < version.parse(SCREAMING_SNAKE_CASE ):
if "dev" in min_version:
snake_case_ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
snake_case_ = f'''This example requires a minimum version of {min_version},'''
error_message += f''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' ) | 531 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.