code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __UpperCamelCase :
__snake_case :int
__snake_case :int
class __UpperCamelCase :
def __init__( self : Any , _lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__lowercase = [[] for _ in range(_lowerCAmelCase )]
__lowercase = size
def __getitem__( self : Tuple , _lowerCAmelCase : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def _a ( self : str ) -> Dict:
"""simple docstring"""
return self._size
def _a ( self : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_lowerCAmelCase , _lowerCAmelCase ) )
def _a ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | None:
"""simple docstring"""
__lowercase = deque([start_vertex] )
__lowercase = [None] * self.size
__lowercase = 0
while queue:
__lowercase = queue.popleft()
__lowercase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__lowercase = current_distance + edge.weight
__lowercase = distances[edge.destination_vertex]
if (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
__lowercase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCAmelCase_ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 81 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase = numpy.array([0, 0])
lowerCamelCase = numpy.array([0.5, 0.8_660_254])
lowerCamelCase = numpy.array([1, 0])
lowerCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = initial_vectors
for _ in range(lowerCAmelCase__ ):
UpperCAmelCase_ = iteration_step(lowerCAmelCase__ )
return vectors
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
UpperCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = numpy.radians(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 82 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE=None ):
if subparsers is not None:
lowercase = subparsers.add_parser('env' )
else:
lowercase = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = torch.__version__
lowercase = torch.cuda.is_available()
lowercase = is_xpu_available()
lowercase = is_npu_available()
lowercase = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__SCREAMING_SNAKE_CASE ):
lowercase = load_config_from_file(args.config_file ).to_dict()
lowercase = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(__SCREAMING_SNAKE_CASE ),
'PyTorch NPU available': str(__SCREAMING_SNAKE_CASE ),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
lowercase = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else F'''\t{accelerate_config}'''
)
print(__SCREAMING_SNAKE_CASE )
lowercase = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase = env_command_parser()
lowercase = parser.parse_args()
env_command(__SCREAMING_SNAKE_CASE )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 84 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[str] = TypeVar("KT")
SCREAMING_SNAKE_CASE__ : str = TypeVar("VT")
class snake_case ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , a_ : KT | str = "root" , a_ : VT | None = None )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = key
SCREAMING_SNAKE_CASE__ : Dict = value
SCREAMING_SNAKE_CASE__ : list[Node[KT, VT]] = []
def __repr__( self : Dict )-> str:
"""simple docstring"""
return F'''Node({self.key}: {self.value})'''
@property
def __lowercase( self : List[str] )-> int:
"""simple docstring"""
return len(self.forward )
class snake_case ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , a_ : float = 0.5 , a_ : int = 16 )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : str = p
SCREAMING_SNAKE_CASE__ : Optional[int] = max_level
def __str__( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = list(self )
if len(a_ ) == 0:
return F'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE__ : str = max((len(str(a_ ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Dict = max(a_ , 4 ) + 4
SCREAMING_SNAKE_CASE__ : Dict = self.head
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : int = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(a_ , '-' ) + '* ' * len(a_ ) )
lines.append(' ' * label_size + '| ' * len(a_ ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : int = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(a_ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(a_ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = node.forward
lines.append('None'.ljust(a_ ) + '* ' * len(a_ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(a_ )
def __iter__( self : Optional[int] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : Optional[int] = node.forward[0]
def __lowercase( self : str )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __lowercase( self : Dict , a_ : Optional[int] )-> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Any = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : str = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(a_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __lowercase( self : Optional[Any] , a_ : KT )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._locate_node(a_ )
if node is not None:
for i, update_node in enumerate(a_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : List[Any] = node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = update_node.forward[:i]
def __lowercase( self : Any , a_ : KT , a_ : VT )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self._locate_node(a_ )
if node is not None:
SCREAMING_SNAKE_CASE__ : str = value
else:
SCREAMING_SNAKE_CASE__ : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , a_ ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : List[Any] = level
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(a_ , a_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(a_ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = new_node
def __lowercase( self : Any , a_ : VT )-> VT | None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self._locate_node(a_ )
if node is not None:
return node.value
return None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE__ : Tuple = skip_list.head
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node.value
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE__ : Dict = skip_list.head
SCREAMING_SNAKE_CASE__ : Any = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : int = node.forward[0]
SCREAMING_SNAKE_CASE__ : List[Any] = node.value
if len(lowercase__ ) != 4:
print()
assert len(lowercase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = SkipList()
assert skip_list.find('Some key' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 1_42 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(lowercase__ : Any ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(lowercase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a ( ):
'''simple docstring'''
def is_sorted(lowercase__ : int ):
return all(next_item >= item for item, next_item in zip(lowercase__ , lst[1:] ) )
SCREAMING_SNAKE_CASE__ : Dict = SkipList()
for i in range(10 ):
skip_list.insert(lowercase__ , lowercase__ )
assert is_sorted(list(lowercase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(lowercase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(lowercase__ ) )
def _a ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 85 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__a :Any = ''
__a :int = ''
__a :str = ''
__a :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __snake_case ( ):
"""simple docstring"""
A_ , A_ = get_dataset(__UpperCamelCase ,__UpperCamelCase )
print("Processing..." )
A_ , A_ , A_ = update_image_and_anno(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for index, image in enumerate(__UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ = random_chars(32 )
A_ = paths[index].split(os.sep )[-1].rsplit("." ,1 )[0]
A_ = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' ,__UpperCamelCase ,[cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__UpperCamelCase )} with {file_name}''' )
A_ = []
for anno in new_annos[index]:
A_ = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__UpperCamelCase )
with open(f'''/{file_root}.txt''' ,"w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = []
A_ = []
for label_file in glob.glob(os.path.join(__UpperCamelCase ,"*.txt" ) ):
A_ = label_file.split(os.sep )[-1].rsplit("." ,1 )[0]
with open(__UpperCamelCase ) as in_file:
A_ = in_file.readlines()
A_ = os.path.join(__UpperCamelCase ,f'''{label_name}.jpg''' )
A_ = []
for obj_list in obj_lists:
A_ = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__UpperCamelCase )
labels.append(__UpperCamelCase )
return img_paths, labels
def __snake_case ( __UpperCamelCase : list ,__UpperCamelCase : list ,__UpperCamelCase : int = 1 ):
"""simple docstring"""
A_ = []
A_ = []
A_ = []
for idx in range(len(__UpperCamelCase ) ):
A_ = []
A_ = img_list[idx]
path_list.append(__UpperCamelCase )
A_ = anno_list[idx]
A_ = cva.imread(__UpperCamelCase )
if flip_type == 1:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A_ = cva.flip(__UpperCamelCase ,__UpperCamelCase )
for bbox in img_annos:
A_ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__UpperCamelCase )
new_imgs_list.append(__UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __snake_case ( __UpperCamelCase : int = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A_ = ascii_lowercase + digits
return "".join(random.choice(__UpperCamelCase ) for _ in range(__UpperCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 86 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = DiTPipeline
UpperCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
UpperCAmelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase__ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=UpperCAmelCase__ , )
A__ = AutoencoderKL()
A__ = DDIMScheduler()
A__ = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=0) ->str:
'''simple docstring'''
if str(UpperCAmelCase__).startswith('''mps'''):
A__ = torch.manual_seed(UpperCAmelCase__)
else:
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(UpperCAmelCase__)
A__ = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = self.get_dummy_inputs(UpperCAmelCase__)
A__ = pipe(**UpperCAmelCase__).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3))
A__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457])
A__ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCAmelCase__ , 1e-3)
def SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase__ , expected_max_diff=1e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
A__ = torch.manual_seed(0)
A__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''')
pipe.to('''cuda''')
A__ = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
A__ = pipe.get_label_ids(UpperCAmelCase__)
A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=40 , output_type='''np''').images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__):
A__ = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""")
assert np.abs((expected_image - image).max()) < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]:
'''simple docstring'''
A__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''')
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to('''cuda''')
A__ = ['''vase''', '''umbrella''']
A__ = pipe.get_label_ids(UpperCAmelCase__)
A__ = torch.manual_seed(0)
A__ = pipe(UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=25 , output_type='''np''').images
for word, image in zip(UpperCAmelCase__ , UpperCAmelCase__):
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f"""/dit/{word}_512.npy""")
assert np.abs((expected_image - image).max()) < 1e-1
| 87 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 0 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def _snake_case ( __snake_case : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__snake_case ):
for j in range(__snake_case ):
_lowerCamelCase : List[str] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 88 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCamelCase:
lowercase_ : Tuple = MBartConfig
lowercase_ : Optional[Any] = {}
lowercase_ : List[Any] = """gelu"""
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=20, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, ) -> Optional[int]:
"""simple docstring"""
_lowercase : str = parent
_lowercase : str = batch_size
_lowercase : Union[str, Any] = seq_length
_lowercase : Dict = is_training
_lowercase : str = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Tuple = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Any = bos_token_id
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowercase : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_lowercase : str = prepare_mbart_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = TFMBartModel(config=lowerCamelCase).get_decoder()
_lowercase : Union[str, Any] = inputs_dict['input_ids']
_lowercase : str = input_ids[:1, :]
_lowercase : Optional[int] = inputs_dict['attention_mask'][:1, :]
_lowercase : Tuple = inputs_dict['head_mask']
_lowercase : Tuple = 1
# first forward pass
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase)
_lowercase , _lowercase : Union[str, Any] = outputs.to_tuple()
_lowercase : Optional[int] = past_key_values[1]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase : Dict = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase_ : Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : Optional[int] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[Any] = True
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = TFMBartModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase_ : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase_ : Optional[Any] = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.translate_src_text(**lowerCamelCase)
self.assertListEqual(self.expected_text, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer(self.src_text, **lowerCamelCase, return_tensors='tf')
_lowercase : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2)
_lowercase : List[Any] = self.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase)
return generated_words
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 89 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[str] ,*A_ : Optional[int] ,**A_ : int ) -> None:
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 91 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Dict:
lowercase : Dict =SwinvaConfig()
lowercase : str =swinva_name.split('''_''' )
lowercase : Dict =name_split[1]
if "to" in name_split[3]:
lowercase : Optional[Any] =int(name_split[3][-3:] )
else:
lowercase : Tuple =int(name_split[3] )
if "to" in name_split[2]:
lowercase : Optional[int] =int(name_split[2][-2:] )
else:
lowercase : Union[str, Any] =int(name_split[2][6:] )
if model_size == "tiny":
lowercase : Tuple =96
lowercase : Any =(2, 2, 6, 2)
lowercase : Union[str, Any] =(3, 6, 12, 24)
elif model_size == "small":
lowercase : List[str] =96
lowercase : Optional[Any] =(2, 2, 18, 2)
lowercase : Optional[Any] =(3, 6, 12, 24)
elif model_size == "base":
lowercase : str =128
lowercase : Dict =(2, 2, 18, 2)
lowercase : Optional[Any] =(4, 8, 16, 32)
else:
lowercase : Optional[int] =192
lowercase : Dict =(2, 2, 18, 2)
lowercase : Any =(6, 12, 24, 48)
if "to" in swinva_name:
lowercase : Any =(12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowercase : Optional[int] =21841
lowercase : List[str] ='''huggingface/label-files'''
lowercase : int ='''imagenet-22k-id2label.json'''
lowercase : int =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Dict =idalabel
lowercase : Dict ={v: k for k, v in idalabel.items()}
else:
lowercase : Dict =1000
lowercase : Optional[Any] ='''huggingface/label-files'''
lowercase : Optional[Any] ='''imagenet-1k-id2label.json'''
lowercase : str =json.load(open(hf_hub_download(__magic_name__ , __magic_name__ , repo_type='''dataset''' ) , '''r''' ) )
lowercase : int ={int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any =idalabel
lowercase : Dict ={v: k for k, v in idalabel.items()}
lowercase : Union[str, Any] =img_size
lowercase : List[Any] =num_classes
lowercase : str =embed_dim
lowercase : int =depths
lowercase : Optional[Any] =num_heads
lowercase : List[str] =window_size
return config
def _lowerCAmelCase ( __magic_name__ : List[Any] ) -> List[Any]:
if "patch_embed.proj" in name:
lowercase : List[Any] =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : Union[str, Any] =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase : Optional[Any] ='''encoder.''' + name
if "attn.proj" in name:
lowercase : List[str] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Optional[Any] =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : str =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : Any =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : int =name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowercase : Union[str, Any] =name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowercase : Optional[int] =name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowercase : Optional[int] =name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowercase : Any =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
lowercase : Tuple ='''layernorm.weight'''
if name == "norm.bias":
lowercase : int ='''layernorm.bias'''
if "head" in name:
lowercase : Any =name.replace('''head''' , '''classifier''' )
else:
lowercase : int ='''swinv2.''' + name
return name
def _lowerCAmelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowercase : int =orig_state_dict.pop(__magic_name__ )
if "mask" in key:
continue
elif "qkv" in key:
lowercase : Optional[int] =key.split('''.''' )
lowercase : Optional[int] =int(key_split[1] )
lowercase : List[str] =int(key_split[3] )
lowercase : Union[str, Any] =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Any =val[:dim, :]
lowercase : Any =val[dim : dim * 2, :]
lowercase : Any =val[-dim:, :]
else:
lowercase : List[str] =val[:dim]
lowercase : Any =val[
dim : dim * 2
]
lowercase : str =val[-dim:]
else:
lowercase : Tuple =val
return orig_state_dict
def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
lowercase : Tuple =timm.create_model(__magic_name__ , pretrained=__magic_name__ )
timm_model.eval()
lowercase : int =get_swinva_config(__magic_name__ )
lowercase : List[Any] =SwinvaForImageClassification(__magic_name__ )
model.eval()
lowercase : int =convert_state_dict(timm_model.state_dict() , __magic_name__ )
model.load_state_dict(__magic_name__ )
lowercase : Tuple ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
lowercase : Tuple =Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : str =image_processor(images=__magic_name__ , return_tensors='''pt''' )
lowercase : Any =timm_model(inputs['''pixel_values'''] )
lowercase : Optional[int] =model(**__magic_name__ ).logits
assert torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__magic_name__ )
model.push_to_hub(
repo_path_or_name=Path(__magic_name__ , __magic_name__ ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 92 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
__A = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Tuple = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase__ :List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase__ :Dict = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase__ :Optional[int] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase__ :Tuple = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
lowerCAmelCase__ :List[Any] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase__ :Union[str, Any] = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase__ :str = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase__ :List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase__ :Union[str, Any] = 'rwkv.' + name
lowerCAmelCase__ :Tuple = weight
return state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) ->Optional[int]:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase__ :Optional[int] = 5_0277
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase__ :Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
lowerCAmelCase__ :Any = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase__ :Any = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowerCAmelCase__ :Dict = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
lowerCAmelCase__ :Union[str, Any] = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :Dict = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
lowerCAmelCase__ , lowerCAmelCase__ :Any = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
lowerCAmelCase__ :str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Any = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase__ :Dict = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase__ :Union[str, Any] = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase__ :Optional[Any] = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size='2GB' )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 93 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''vit'''
def __init__( self : int , lowerCAmelCase_ : Any=768 , lowerCAmelCase_ : str=12 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[str]=3_072 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : Optional[Any]=1e-12 , lowerCAmelCase_ : Any=224 , lowerCAmelCase_ : str=16 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=16 , **lowerCAmelCase_ : Union[str, Any] , ) -> Dict:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = initializer_range
UpperCAmelCase_ : Tuple = layer_norm_eps
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Optional[int] = qkv_bias
UpperCAmelCase_ : Tuple = encoder_stride
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-4
| 95 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
class __A :
def __init__( self : Optional[Any] , __snake_case : int ) -> None:
__magic_name__: Optional[int] = size
# approximate the overall size of segment tree with given value
__magic_name__: str = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
__magic_name__: Dict = [0 for i in range(0 , 4 * size )]
__magic_name__: Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowerCamelCase__ ( self : str , __snake_case : int ) -> int:
return idx * 2
def lowerCamelCase__ ( self : Dict , __snake_case : int ) -> int:
return idx * 2 + 1
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] ) -> None:
if left_element == right_element:
__magic_name__: List[Any] = a[left_element - 1]
else:
__magic_name__: Any = (left_element + right_element) // 2
self.build(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case )
self.build(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case )
__magic_name__: List[str] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
def lowerCamelCase__ ( self : Tuple , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
if self.flag[idx] is True:
__magic_name__: int = self.lazy[idx]
__magic_name__: Any = False
if left_element != right_element:
__magic_name__: Optional[Any] = self.lazy[idx]
__magic_name__: Dict = self.lazy[idx]
__magic_name__: Dict = True
__magic_name__: Optional[int] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
__magic_name__: Optional[Any] = val
if left_element != right_element:
__magic_name__: Dict = val
__magic_name__: Dict = val
__magic_name__: Union[str, Any] = True
__magic_name__: List[Any] = True
return True
__magic_name__: int = (left_element + right_element) // 2
self.update(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
self.update(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = max(
self.segment_tree[self.left(__snake_case )] , self.segment_tree[self.right(__snake_case )] )
return True
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int ) -> int | float:
if self.flag[idx] is True:
__magic_name__: List[str] = self.lazy[idx]
__magic_name__: str = False
if left_element != right_element:
__magic_name__: int = self.lazy[idx]
__magic_name__: Dict = self.lazy[idx]
__magic_name__: List[str] = True
__magic_name__: List[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
__magic_name__: Optional[Any] = (left_element + right_element) // 2
__magic_name__: Union[str, Any] = self.query(self.left(__snake_case ) , __snake_case , __snake_case , __snake_case , __snake_case )
__magic_name__: List[str] = self.query(self.right(__snake_case ) , mid + 1 , __snake_case , __snake_case , __snake_case )
return max(__snake_case , __snake_case )
def __str__( self : Dict ) -> str:
return str([self.query(1 , 1 , self.size , __snake_case , __snake_case ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__lowerCamelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowerCamelCase = 15
__lowerCamelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 96 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[str] = ['image_processor']
a :Tuple = 'SamImageProcessor'
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor
lowercase_ = -1_0
lowercase_ = self.image_processor.size['''longest_edge''']
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> BatchEncoding:
lowercase_ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ = encoding_image_processor['''original_sizes''']
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks if Torch or TF tensor
lowercase_ = original_sizes.numpy()
lowercase_ , lowercase_ , lowercase_ = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , )
lowercase_ = self._normalize_and_convert(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_points=SCREAMING_SNAKE_CASE_ , input_labels=SCREAMING_SNAKE_CASE_ , input_boxes=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , )
return encoding_image_processor
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]="pt" , ) -> Dict:
if input_points is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] ) for point in input_points
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for point, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ , lowercase_ = self._pad_points_and_labels(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_labels is not None:
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box in input_boxes
]
else:
lowercase_ = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , is_bounding_box=SCREAMING_SNAKE_CASE_ )
for box, original_size in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
lowercase_ = np.array(SCREAMING_SNAKE_CASE_ )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# boxes batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ = tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
# point batch size of 1 by default
lowercase_ = tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
lowercase_ = max([point.shape[0] for point in input_points] )
lowercase_ = []
for i, point in enumerate(SCREAMING_SNAKE_CASE_ ):
if point.shape[0] != expected_nb_points:
lowercase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE_ )
lowercase_ = processed_input_points
return input_points, input_labels
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> np.ndarray:
lowercase_ , lowercase_ = original_size
lowercase_ , lowercase_ = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE_ , longest_edge=SCREAMING_SNAKE_CASE_ )
lowercase_ = deepcopy(SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 2 , 2 )
lowercase_ = coords[..., 0] * (new_w / old_w)
lowercase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , ) -> List[str]:
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ): # Checks for TF or Torch tensor
lowercase_ = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for input_point in input_points]
else:
lowercase_ = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE_ ):
raise ValueError('''Input labels must be a list of list integers.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ) for label in input_labels]
else:
lowercase_ = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE_ , '''numpy''' ):
lowercase_ = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE_ )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE_ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
lowercase_ = [np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 97 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ : List[str] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
lowercase__ : Any = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
@lru_cache()
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = (
list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase = bs[:]
_UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase )
cs.append(2**8 + n )
n += 1
_UpperCamelCase = [chr(lowercase ) for n in cs]
return dict(zip(lowercase, lowercase ) )
def a__ ( lowercase : Any ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = set()
_UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase = char
return pairs
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Tuple = VOCAB_FILES_NAMES
_snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int]="replace" , lowerCAmelCase__ : Optional[int]="<s>" , lowerCAmelCase__ : List[str]="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<unk>" , lowerCAmelCase__ : str="<pad>" , lowerCAmelCase__ : Optional[Any]="<mask>" , lowerCAmelCase__ : int=False , **lowerCAmelCase__ : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase = json.load(lowerCAmelCase__ )
_UpperCamelCase = {v: k for k, v in self.encoder.items()}
_UpperCamelCase = errors # how to handle errors in decoding
_UpperCamelCase = bytes_to_unicode()
_UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_UpperCamelCase = {}
_UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
return len(self.encoder )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCamelCase = tuple(lowerCAmelCase__ )
_UpperCamelCase = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase = bigram
_UpperCamelCase = []
_UpperCamelCase = 0
while i < len(lowerCAmelCase__ ):
try:
_UpperCamelCase = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase = tuple(lowerCAmelCase__ )
_UpperCamelCase = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
_UpperCamelCase = get_pairs(lowerCAmelCase__ )
_UpperCamelCase = ''' '''.join(lowerCAmelCase__ )
_UpperCamelCase = word
return word
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
_UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = ''''''.join(lowerCAmelCase__ )
_UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
_UpperCamelCase = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
_UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=False , **lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase = ''' ''' + text
return (text, kwargs)
| 98 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
SCREAMING_SNAKE_CASE = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 99 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 0 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
def get_masked_lm_array(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_array(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_layer_array(lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
def get_encoder_attention_layer_array(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = array.reshape(lowerCAmelCase_ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(lowerCAmelCase_ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(lowerCAmelCase_ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
lowerCAmelCase_ , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_attention_layer_norm/gamma''' )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_attention_layer_norm/beta''' )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_intermediate_dense/kernel''' )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_intermediate_dense/bias''' )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_output_dense/kernel''' )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_output_dense/bias''' )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_output_layer_norm/gamma''' )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(lowerCAmelCase_ , '''_output_layer_norm/beta''' )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_position_embedding_layer/embeddings''' )
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_type_embedding_layer/embeddings''' )
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_embedding_norm_layer/gamma''' )
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array('''dense/kernel''' )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array('''dense/bias''' )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array('''layer_norm/gamma''' )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array('''layer_norm/beta''' )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array('''embedding_table''' )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_pooler_layer/kernel''' )
SCREAMING_SNAKE_CASE__ = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(lowerCAmelCase_ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(lowerCAmelCase_ )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_A : str = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 100 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 0 |
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) + 1
SCREAMING_SNAKE_CASE_ : List[str] = len(A__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
SCREAMING_SNAKE_CASE_ : Any = [[0 for i in range(A__ )] for j in range(A__ )]
# since string of zero length match pattern of zero length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, A__ ):
for j in range(1, A__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
SCREAMING_SNAKE_CASE_ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
SCREAMING_SNAKE_CASE_ : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
SCREAMING_SNAKE_CASE_ : List[Any] = dp[i - 1][j]
else:
SCREAMING_SNAKE_CASE_ : List[str] = 0
else:
SCREAMING_SNAKE_CASE_ : int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
lowerCAmelCase__ : List[Any] ='aab'
lowerCAmelCase__ : Dict ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 101 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 0 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="attention" ):
UpperCamelCase : List[str] = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
UpperCamelCase : str = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
UpperCamelCase : Optional[Any] = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
UpperCamelCase : str = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
if split_mlp_wi:
UpperCamelCase : Optional[int] = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
UpperCamelCase : Optional[Any] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
UpperCamelCase : int = (wi_a, wi_a)
else:
UpperCamelCase : Optional[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
UpperCamelCase : Optional[Any] = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def UpperCamelCase (SCREAMING_SNAKE_CASE , *, SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = traverse_util.flatten_dict(variables["""target"""] )
UpperCamelCase : Optional[Any] = {"""/""".join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase : Union[str, Any] = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase : Dict = old["""token_embedder/embedding"""]
# Encoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
UpperCamelCase : int = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """encoder""" , """pre_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """encoder""" , """attention""" )
UpperCamelCase : Optional[int] = layer_norm
UpperCamelCase : Tuple = k.T
UpperCamelCase : Any = o.T
UpperCamelCase : Tuple = q.T
UpperCamelCase : str = v.T
# Block i, layer 1 (MLP).
UpperCamelCase : Optional[int] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """encoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase : Tuple = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """encoder""" , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = layer_norm
if split_mlp_wi:
UpperCamelCase : Optional[Any] = wi[0].T
UpperCamelCase : Optional[int] = wi[1].T
else:
UpperCamelCase : str = wi.T
UpperCamelCase : Optional[Any] = wo.T
UpperCamelCase : List[str] = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCamelCase : Optional[Any] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
UpperCamelCase : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , """pre_self_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , """self_attention""" )
UpperCamelCase : Tuple = layer_norm
UpperCamelCase : int = k.T
UpperCamelCase : Any = o.T
UpperCamelCase : List[str] = q.T
UpperCamelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase : List[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , """pre_cross_attention_layer_norm""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , """encoder_decoder_attention""" )
UpperCamelCase : Any = layer_norm
UpperCamelCase : List[Any] = k.T
UpperCamelCase : int = o.T
UpperCamelCase : str = q.T
UpperCamelCase : Tuple = v.T
# Block i, layer 2 (MLP).
UpperCamelCase : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , """pre_mlp_layer_norm""" )
UpperCamelCase , UpperCamelCase : Optional[int] = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , """decoder""" , SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = layer_norm
if split_mlp_wi:
UpperCamelCase : List[Any] = wi[0].T
UpperCamelCase : Dict = wi[1].T
else:
UpperCamelCase : Tuple = wi.T
UpperCamelCase : int = wo.T
UpperCamelCase : List[str] = old["""decoder/decoder_norm/scale"""]
UpperCamelCase : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase : Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase : Any = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
UpperCamelCase : Optional[Any] = state_dict["""shared.weight"""]
return state_dict
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
UpperCamelCase : int = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
UpperCamelCase : Optional[int] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase : int = TaEncoderModel(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : Union[str, Any] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE )
print("""Done""" )
if __name__ == "__main__":
__magic_name__ : Any = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__magic_name__ : Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 102 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ ) -> list[list[float]]:
_snake_case = []
for data in source_data:
for i, el in enumerate(lowerCAmelCase_ ):
if len(lowerCAmelCase_ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(lowerCAmelCase_ ) )
return data_lists
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[float]]:
_snake_case = []
for dlist, weight in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = min(lowerCAmelCase_ )
_snake_case = max(lowerCAmelCase_ )
_snake_case = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_snake_case = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCAmelCase_ )
score_lists.append(lowerCAmelCase_ )
return score_lists
def snake_case ( lowerCAmelCase_ ) -> list[float]:
_snake_case = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(lowerCAmelCase_ ):
_snake_case = final_scores[j] + ele
return final_scores
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[float]]:
_snake_case = get_data(lowerCAmelCase_ )
_snake_case = calculate_each_score(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = generate_final_scores(lowerCAmelCase_ )
# append scores to source data
for i, ele in enumerate(lowerCAmelCase_ ):
source_data[i].append(lowerCAmelCase_ )
return source_data
| 103 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 0 |
"""simple docstring"""
from pathlib import Path
import fire
from tqdm import tqdm
def _lowerCamelCase ( UpperCAmelCase_ : Tuple="ro", UpperCAmelCase_ : Any="en", UpperCAmelCase_ : List[str]="wmt16", UpperCAmelCase_ : Dict=None ) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
A__ = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
A__ = datasets.load_dataset(UpperCAmelCase_, UpperCAmelCase_ )
if save_dir is None:
A__ = F"""{dataset}-{pair}"""
A__ = Path(UpperCAmelCase_ )
save_dir.mkdir(exist_ok=UpperCAmelCase_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
A__ = "val" if split == "validation" else split
A__ = save_dir.joinpath(F"""{fn}.source""" )
A__ = save_dir.joinpath(F"""{fn}.target""" )
A__ = src_path.open("w+" )
A__ = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
A__ = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 104 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = RoFormerTokenizer
__a : int = RoFormerTokenizerFast
__a : Union[str, Any] = True
__a : Tuple = True
def snake_case ( self ):
super().setUp()
def snake_case ( self ,**snake_case__ ):
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = '永和服装饰品有限公司,今天天气非常好'
SCREAMING_SNAKE_CASE_ : Tuple = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,output_text.split() )
SCREAMING_SNAKE_CASE_ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Dict = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,output_text.split() )
SCREAMING_SNAKE_CASE_ : Tuple = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
pass
| 105 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 0 |
__snake_case :str ={
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def lowerCamelCase_ ( lowerCAmelCase__ : float ) -> str:
'''simple docstring'''
assert type(lowerCAmelCase__ ) in (int, float) and decimal == int(lowerCAmelCase__ )
A = int(lowerCAmelCase__ )
A = ''
A = False
if decimal < 0:
A = True
decimal *= -1
while decimal > 0:
A , A = divmod(lowerCAmelCase__ , 16 )
A = values[remainder] + hexadecimal
A = '0x' + hexadecimal
if negative:
A = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A , _A = emb.weight.shape
_A = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_A = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : List[Any]="facebook/mbart-large-en-ro" , __snake_case : Tuple=False , __snake_case : Dict=False ):
_A = torch.load(__snake_case , map_location='cpu' )['model']
remove_ignore_keys_(__snake_case )
_A = state_dict['encoder.embed_tokens.weight'].shape[0]
_A = MBartConfig.from_pretrained(__snake_case , vocab_size=__snake_case )
if mbart_aa and finetuned:
_A = 'relu'
_A = state_dict['decoder.embed_tokens.weight']
_A = MBartForConditionalGeneration(__snake_case )
model.model.load_state_dict(__snake_case )
if finetuned:
_A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 107 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 0 |
def _SCREAMING_SNAKE_CASE ( __snake_case = 5_0_0_0_0_0_0_0 ) -> int:
_UpperCAmelCase = set()
_UpperCAmelCase = int((limit - 2_4) ** (1 / 2) )
_UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __snake_case ) ) )
for primea in primes:
_UpperCAmelCase = primea * primea
for primea in primes:
_UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
_UpperCAmelCase = primea * primea * primea * primea
_UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }") | 108 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a = logging.get_logger(__name__)
@dataclass
class __a :
__UpperCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__UpperCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.task_name.lower()
class __a ( _snake_case ):
__UpperCamelCase : Union[str, Any] = 'train'
__UpperCamelCase : Union[str, Any] = 'dev'
__UpperCamelCase : List[str] = 'test'
class __a ( _snake_case ):
__UpperCamelCase : GlueDataTrainingArguments
__UpperCamelCase : str
__UpperCamelCase : List[InputFeatures]
def __init__( self : List[str] ,lowerCamelCase : GlueDataTrainingArguments ,lowerCamelCase : PreTrainedTokenizerBase ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Union[str, Split] = Split.train ,lowerCamelCase : Optional[str] = None ,):
'''simple docstring'''
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ,lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = glue_processors[args.task_name]()
__SCREAMING_SNAKE_CASE = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase ,lowerCamelCase ):
try:
__SCREAMING_SNAKE_CASE = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
__SCREAMING_SNAKE_CASE = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" ,)
__SCREAMING_SNAKE_CASE = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = label_list[2], label_list[1]
__SCREAMING_SNAKE_CASE = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__SCREAMING_SNAKE_CASE = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
__SCREAMING_SNAKE_CASE = time.time()
__SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" ,time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
__SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
__SCREAMING_SNAKE_CASE = self.processor.get_test_examples(args.data_dir )
else:
__SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
__SCREAMING_SNAKE_CASE = examples[:limit_length]
__SCREAMING_SNAKE_CASE = glue_convert_examples_to_features(
lowerCamelCase ,lowerCamelCase ,max_length=args.max_seq_length ,label_list=lowerCamelCase ,output_mode=self.output_mode ,)
__SCREAMING_SNAKE_CASE = time.time()
torch.save(self.features ,lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
return self.features[i]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.label_list
| 109 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': 15_36,
'junnyu/roformer_chinese_base': 15_36,
'junnyu/roformer_chinese_char_small': 5_12,
'junnyu/roformer_chinese_char_base': 5_12,
'junnyu/roformer_small_discriminator': 1_28,
'junnyu/roformer_small_generator': 1_28,
}
UpperCamelCase__ = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class a ( lowercase ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = RoFormerTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
):
UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase__ : str = do_lower_case
UpperCAmelCase__ : Union[str, Any] = strip_accents
UpperCAmelCase__ : Dict = pre_tok_class(**UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
def __getstate__( self ):
UpperCAmelCase__ : int = self.__dict__.copy()
UpperCAmelCase__ : int = BertPreTokenizer()
return state
def __setstate__( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = d
UpperCAmelCase__ : List[str] = self.__dict__['_tokenizer'].get_vocab()
UpperCAmelCase__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : int = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , **UpperCamelCase_ , ):
UpperCAmelCase__ : int = BertPreTokenizer()
return super().save_pretrained(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
| 110 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
__lowerCAmelCase : Optional[int] ={
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
__lowerCAmelCase : Optional[int] ={
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
A__ = from_type.lower().strip("s" )
A__ = to_type.lower().strip("s" )
A__ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
A__ = UNIT_SYMBOL.get(_lowerCamelCase , _lowerCamelCase )
if from_sanitized not in METRIC_CONVERSION:
A__ = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
if to_sanitized not in METRIC_CONVERSION:
A__ = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(_lowerCamelCase )}"
)
raise ValueError(_lowerCamelCase )
A__ = METRIC_CONVERSION[from_sanitized]
A__ = METRIC_CONVERSION[to_sanitized]
A__ = 1
if from_exponent > to_exponent:
A__ = from_exponent - to_exponent
else:
A__ = -(to_exponent - from_exponent)
return value * pow(10 , _lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 440 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
if length <= 0 or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 507 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _A ( __snake_case :Optional[Any] ) -> Any:
"""simple docstring"""
def is_in_circle(__snake_case :Any , __snake_case :List[str] ) -> bool:
__SCREAMING_SNAKE_CASE = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__SCREAMING_SNAKE_CASE = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__snake_case ) )
# The ratio of the area for circle to square is pi/4.
__SCREAMING_SNAKE_CASE = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _A ( __snake_case :int , __snake_case :Tuple , __snake_case :Dict = 0.0 , __snake_case :str = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(__snake_case , __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value)
def _A ( __snake_case :Union[str, Any] , __snake_case :Optional[int] = 0.0 , __snake_case :List[str] = 1.0 ) -> None:
"""simple docstring"""
def identity_function(__snake_case :Union[str, Any] ) -> float:
return x
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , __snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def _A ( __snake_case :Any ) -> None:
"""simple docstring"""
def function_to_integrate(__snake_case :Union[str, Any] ) -> float:
return sqrt(4.0 - x * x )
__SCREAMING_SNAKE_CASE = area_under_curve_estimator(
__snake_case , __snake_case , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
'''simple docstring'''
import os
import string
import sys
UpperCAmelCase = 1 << 8
UpperCAmelCase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
UpperCAmelCase = KEYMAP['''up''']
UpperCAmelCase = KEYMAP['''left''']
if sys.platform == "win32":
UpperCAmelCase = []
UpperCAmelCase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCAmelCase = ord(str(i))
def __UpperCamelCase ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
__lowercase ='mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
__lowercase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__lowercase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__lowercase =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
__lowercase =chr(KEYMAP['esc'] )
except KeyError:
__lowercase =cha[1]
else:
__lowercase =ch.decode(lowercase__ )
else:
__lowercase =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__lowercase =sys.stdin.fileno()
__lowercase =termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
__lowercase =sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__, termios.TCSADRAIN, lowercase__ )
return ch
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
__lowercase =get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
__lowercase =get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__lowerCAmelCase : Tuple = random.Random()
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __a : Optional[int] , __a : List[str]=7 , __a : List[Any]=4_0_0 , __a : str=2_0_0_0 , __a : Dict=1 , __a : Optional[Any]=0.0 , __a : Optional[int]=1_6_0_0_0 , __a : Optional[int]=True , __a : Optional[int]=8_0 , __a : str=1_6 , __a : str=6_4 , __a : str="hann_window" , __a : Dict=8_0 , __a : str=7_6_0_0 , __a : Dict=1e-10 , __a : Dict=True , ) -> List[Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = feature_size
__UpperCAmelCase = padding_value
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = do_normalize
__UpperCAmelCase = num_mel_bins
__UpperCAmelCase = hop_length
__UpperCAmelCase = win_length
__UpperCAmelCase = win_function
__UpperCAmelCase = fmin
__UpperCAmelCase = fmax
__UpperCAmelCase = mel_floor
__UpperCAmelCase = return_attention_mask
def snake_case__ ( self : List[str] ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def snake_case__ ( self : Any , __a : Optional[Any]=False , __a : Dict=False ) -> str:
def _flatten(__a : str ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
__UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
def snake_case__ ( self : Dict , __a : Dict=False , __a : Dict=False ) -> Union[str, Any]:
if equal_length:
__UpperCAmelCase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class A ( a__ , unittest.TestCase ):
a_ = SpeechTaFeatureExtractor
def snake_case__ ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase = SpeechTaFeatureExtractionTester(self )
def snake_case__ ( self : Dict , __a : int ) -> Tuple:
self.assertTrue(np.all(np.mean(lowerCamelCase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase_ , axis=0 ) - 1 ) < 1e-3 ) )
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
__UpperCAmelCase = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def snake_case__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCAmelCase = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__ ( self : Tuple ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in lengths]
__UpperCAmelCase = ['''longest''', '''max_length''', '''do_not_pad''']
__UpperCAmelCase = [None, 1_6_0_0, None]
for max_length, padding in zip(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCAmelCase = feat_extract(lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def snake_case__ ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def snake_case__ ( self : Optional[Any] ) -> str:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = feat_extract(
lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__UpperCAmelCase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def snake_case__ ( self : Optional[int] ) -> int:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
__UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__UpperCAmelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case__ ( self : str ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__UpperCAmelCase = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase = feature_extractor(audio_target=lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test batched
__UpperCAmelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase = np.asarray(lowerCamelCase_ )
__UpperCAmelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
__UpperCAmelCase = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) for x, y in zip(lowerCamelCase_ , processed_features[input_name] ) ) )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__ ( self : str ) -> List[Any]:
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCamelCase_ )
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def snake_case__ ( self : List[str] ) -> Dict:
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.num_mel_bins # hack!
__UpperCAmelCase = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' )[input_name]
__UpperCAmelCase = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**lowerCamelCase_ )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCAmelCase = [len(lowerCamelCase_ ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = feat_extract.num_mel_bins # hack!
__UpperCAmelCase = feat_extract.pad(lowerCamelCase_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase_ )
def snake_case__ ( self : Optional[Any] ) -> str:
__UpperCAmelCase = self.feat_extract_dict
__UpperCAmelCase = True
__UpperCAmelCase = self.feature_extraction_class(**lowerCamelCase_ )
__UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_target()
__UpperCAmelCase = [len(lowerCamelCase_ ) for x in speech_inputs]
__UpperCAmelCase = feat_extract.model_input_names[0]
__UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
__UpperCAmelCase = min(lowerCamelCase_ )
__UpperCAmelCase = feat_extract.num_mel_bins # hack!
__UpperCAmelCase = feat_extract.pad(
lowerCamelCase_ , padding='''max_length''' , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def snake_case__ ( self : int , __a : Optional[int] ) -> Dict:
from datasets import load_dataset
__UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__UpperCAmelCase = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
# fmt: off
__UpperCAmelCase = torch.tensor(
[2.3_804e-03, 2.0_752e-03, 1.9_836e-03, 2.1_057e-03, 1.6_174e-03,
3.0_518e-04, 9.1_553e-05, 3.3_569e-04, 9.7_656e-04, 1.8_311e-03,
2.0_142e-03, 2.1_057e-03, 1.7_395e-03, 4.5_776e-04, -3.9_673e-04,
4.5_776e-04, 1.0_071e-03, 9.1_553e-05, 4.8_828e-04, 1.1_597e-03,
7.3_242e-04, 9.4_604e-04, 1.8_005e-03, 1.8_311e-03, 8.8_501e-04,
4.2_725e-04, 4.8_828e-04, 7.3_242e-04, 1.0_986e-03, 2.1_057e-03] )
# fmt: on
__UpperCAmelCase = self._load_datasamples(1 )
__UpperCAmelCase = SpeechTaFeatureExtractor()
__UpperCAmelCase = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , lowerCamelCase_ , atol=1e-6 ) )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
# fmt: off
__UpperCAmelCase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__UpperCAmelCase = self._load_datasamples(1 )
__UpperCAmelCase = SpeechTaFeatureExtractor()
__UpperCAmelCase = feature_extractor(audio_target=lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , lowerCamelCase_ , atol=1e-4 ) )
| 262 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCamelCase_ ( a__ ):
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ) -> List[Any]:
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
UpperCAmelCase = eval_examples
UpperCAmelCase = post_process_function
def UpperCamelCase_ ( self , snake_case__ = None , snake_case__=None , snake_case__ = None , snake_case__ = "eval" , **snake_case__ , ) -> Dict[str, float]:
"""simple docstring"""
UpperCAmelCase = gen_kwargs.copy()
UpperCAmelCase = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase = gen_kwargs
UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase = self.get_eval_dataloader(lowerCamelCase_ )
UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = time.time()
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase = eval_loop(
lowerCamelCase_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
else:
UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowerCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase_ )
return metrics
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" , **snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = gen_kwargs.copy()
UpperCAmelCase = self.get_test_dataloader(lowerCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase = self.compute_metrics
UpperCAmelCase = None
UpperCAmelCase = time.time()
UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase = eval_loop(
lowerCamelCase_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase_ , metric_key_prefix=lowerCamelCase_ , )
finally:
UpperCAmelCase = compute_metrics
UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowerCamelCase_ , lowerCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase = self.post_process_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , """predict""" )
UpperCAmelCase = self.compute_metrics(lowerCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCAmelCase = metrics.pop(lowerCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase_ )
| 673 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 0 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCamelCase ( snake_case__ : Tuple ,snake_case__ : Optional[int] ,snake_case__ : Union[str, Any] = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(snake_case__ ), magnitude * sin(snake_case__ )]
return [magnitude * cos(radians(snake_case__ ) ), magnitude * sin(radians(snake_case__ ) )]
def UpperCamelCase ( snake_case__ : int ,snake_case__ : List[str] ,snake_case__ : Optional[int] = 10**-1 ):
'''simple docstring'''
__snake_case :Dict = cross(snake_case__ ,snake_case__ )
__snake_case :str = sum(snake_case__ )
return abs(snake_case__ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
lowerCamelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 455 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 0 |
import itertools
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
_a = 2
while True:
if is_prime(_UpperCAmelCase ):
yield num
num += 1
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCAmelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 562 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( a__ , unittest.TestCase):
"""simple docstring"""
_A : List[str] = XLMTokenizer
_A : Optional[int] = False
def __UpperCamelCase (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
snake_case_ : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
snake_case_ : Optional[int] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Optional[Any] = """lower newer"""
snake_case_ : Dict = """lower newer"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Dict = """lower"""
snake_case_ : str = ["""low""", """er</w>"""]
snake_case_ : Tuple = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : List[Any] = tokens + ["""<unk>"""]
snake_case_ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
@slow
def __UpperCamelCase (self ):
snake_case_ : Any = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""" )
snake_case_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase_ )
snake_case_ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase_ )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
snake_case_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 480 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : str ):
"""simple docstring"""
A__ : Optional[Any] =tmp_path / "cache"
A__ : Optional[Any] ={"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : List[str] =TextDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_text_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Any ):
"""simple docstring"""
A__ : List[str] =tmp_path / "cache"
A__ : Optional[int] ={"text": "string"}
A__ : Tuple =features.copy() if features else default_expected_features
A__ : List[str] =(
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : int =TextDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_text_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[str] =tmp_path / "cache"
A__ : Union[str, Any] ={"text": "string"}
A__ : str =TextDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_text_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowercase ( UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
if issubclass(UpperCamelCase , UpperCamelCase ):
A__ : Any =text_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
A__ : int =[text_path]
A__ : Dict =tmp_path / "cache"
A__ : Optional[int] ={"text": "string"}
A__ : Any =TextDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_text_dataset(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : int=("train",) ):
"""simple docstring"""
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
A__ : Optional[Any] =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowercase ( UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any ):
"""simple docstring"""
A__ : Dict =tmp_path / "cache"
A__ : Optional[int] ={"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Union[str, Any] =TextDatasetReader({"train": text_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_text_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
"""simple docstring"""
A__ : Union[str, Any] =tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A__ : Optional[int] ={"text": "string"}
A__ : Optional[int] =features.copy() if features else default_expected_features
A__ : Union[str, Any] =(
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : int =TextDatasetReader({"train": text_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_text_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
if split:
A__ : Optional[int] ={split: text_path}
else:
A__ : Tuple ="train"
A__ : Dict ={"train": text_path, "test": text_path}
A__ : Dict =tmp_path / "cache"
A__ : str ={"text": "string"}
A__ : Optional[int] =TextDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_text_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 656 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 0 |
def lowercase__ ( A_: Tuple , A_: Dict ) -> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(A_ , x % y )
def lowercase__ ( A_: Dict , A_: str ) -> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(A_ , A_ )
def lowercase__ ( A_: List[str] = 20 ) -> int:
"""simple docstring"""
__UpperCAmelCase =1
for i in range(1 , n + 1 ):
__UpperCAmelCase =lcm(A_ , A_ )
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 68 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
__lowerCAmelCase : Optional[int] =(
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCAmelCase : int =(((515, 22, 13), 555), ((61, 35, 49), 150))
__lowerCAmelCase : Dict =[2, 4, 1, 5]
__lowerCAmelCase : Union[str, Any] =len(train_data)
__lowerCAmelCase : Any =0.009
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : int="train" ):
return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output(
_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : Optional[int] ):
A__ = 0
for i in range(len(_lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=m ):
A__ = 0
for i in range(_lowerCamelCase ):
if index == -1:
summation_value += _error(_lowerCamelCase )
else:
summation_value += _error(_lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCamelCase ( _lowerCamelCase : Tuple ):
A__ = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m
return cost_derivative_value
def UpperCamelCase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A__ = 0.0_0_0_0_0_2
A__ = 0
A__ = 0
while True:
j += 1
A__ = [0, 0, 0, 0]
for i in range(0 , len(_lowerCamelCase ) ):
A__ = get_cost_derivative(i - 1 )
A__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ):
break
A__ = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCamelCase ( ):
for i in range(len(_lowerCamelCase ) ):
print(("Actual output value:", output(_lowerCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_lowerCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 440 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 507 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_snake_case : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __SCREAMING_SNAKE_CASE ( a__ ):
SCREAMING_SNAKE_CASE__ =field(default=a__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=a__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = v.to_dict()
return d
| 693 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 0 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : List[str], lowercase__ : Union[str, Any] = None, lowercase__ : List[Any] = None, lowercase__ : Any = None, lowercase__ : List[Any] = None, lowercase__ : Optional[Any] = None, lowercase__ : Optional[Any] = False, ):
'''simple docstring'''
__lowercase =bnb_quantization_config.load_in_abit
__lowercase =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
__lowercase =[]
# custom device map
if isinstance(lowercase__, lowercase__ ) and len(device_map.keys() ) > 1:
__lowercase =[key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__lowercase =get_keys_to_not_convert(lowercase__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase__ )
__lowercase =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__lowercase =[]
__lowercase =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase__ )
# compatibility with peft
__lowercase =load_in_abit
__lowercase =load_in_abit
__lowercase =get_parameter_device(lowercase__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
__lowercase =replace_with_bnb_layers(lowercase__, lowercase__, modules_to_not_convert=lowercase__ )
# convert param to the right dtype
__lowercase =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__lowercase =name.replace('.weight', '' ).replace('.bias', '' )
__lowercase =getattr(lowercase__, lowercase__, lowercase__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase__ ):
param.to(lowercase__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
__lowercase =replace_with_bnb_layers(
lowercase__, lowercase__, modules_to_not_convert=lowercase__ )
__lowercase =get_quantized_model_device_map(
lowercase__, lowercase__, lowercase__, max_memory=lowercase__, no_split_module_classes=lowercase__, )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__lowercase =True
__lowercase =any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
lowercase__, lowercase__, lowercase__, dtype=bnb_quantization_config.torch_dtype, offload_folder=lowercase__, offload_state_dict=lowercase__, keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules, offload_abit_bnb=load_in_abit and offload, )
return dispatch_model(lowercase__, device_map=lowercase__, offload_dir=lowercase__ )
def __UpperCamelCase ( lowercase__ : List[Any], lowercase__ : Optional[Any], lowercase__ : int=None, lowercase__ : List[Any]=None, lowercase__ : str=None ):
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
__lowercase ={'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(lowercase__, lowercase__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
__lowercase ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__lowercase ={}
__lowercase =special_dtypes
__lowercase =no_split_module_classes
__lowercase =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__lowercase =get_balanced_memory(
lowercase__, low_zero=(device_map == 'balanced_low_0'), max_memory=lowercase__, **lowercase__, )
__lowercase =max_memory
__lowercase =infer_auto_device_map(lowercase__, **lowercase__ )
if isinstance(lowercase__, lowercase__ ):
# check if don't have any quantized module on the cpu
__lowercase =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__lowercase ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __UpperCamelCase ( lowercase__ : int, lowercase__ : int, lowercase__ : Tuple=None, lowercase__ : List[Any]=None ):
'''simple docstring'''
if modules_to_not_convert is None:
__lowercase =[]
__lowercase , __lowercase =_replace_with_bnb_layers(
lowercase__, lowercase__, lowercase__, lowercase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __UpperCamelCase ( lowercase__ : Optional[int], lowercase__ : Dict, lowercase__ : Any=None, lowercase__ : Tuple=None, ):
'''simple docstring'''
__lowercase =False
for name, module in model.named_children():
if current_key_name is None:
__lowercase =[]
current_key_name.append(lowercase__ )
if isinstance(lowercase__, nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__lowercase ='.'.join(lowercase__ )
__lowercase =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__lowercase =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__lowercase =bnb.nn.LinearabitLt(
module.in_features, module.out_features, module.bias is not None, has_fpaa_weights=lowercase__, threshold=bnb_quantization_config.llm_inta_threshold, )
elif bnb_quantization_config.load_in_abit:
__lowercase =bnb.nn.Linearabit(
module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_abit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant, quant_type=bnb_quantization_config.bnb_abit_quant_type, )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
__lowercase =module.weight.data
if module.bias is not None:
__lowercase =module.bias.data
bnb_module.requires_grad_(lowercase__ )
setattr(lowercase__, lowercase__, lowercase__ )
__lowercase =True
if len(list(module.children() ) ) > 0:
__lowercase , __lowercase =_replace_with_bnb_layers(
lowercase__, lowercase__, lowercase__, lowercase__ )
__lowercase =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCamelCase ( lowercase__ : List[Any] ):
'''simple docstring'''
with init_empty_weights():
__lowercase =deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__lowercase =find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__, lowercase__ ):
__lowercase =sum(list(tied_params.values() ), [] ) + list(tied_params.keys() )
else:
__lowercase =sum(lowercase__, [] )
__lowercase =len(lowercase__ ) > 0
# Check if it is a base model
__lowercase =False
if hasattr(lowercase__, 'base_model_prefix' ):
__lowercase =not hasattr(lowercase__, model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowercase =list(model.named_children() )
__lowercase =[list_modules[-1][0]]
# add last module together with tied weights
__lowercase =set(lowercase__ ) - set(lowercase__ )
__lowercase =list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
__lowercase =['.weight', '.bias']
__lowercase =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowercase =name.replace(lowercase__, '' )
filtered_module_names.append(lowercase__ )
return filtered_module_names
def __UpperCamelCase ( lowercase__ : Tuple ):
'''simple docstring'''
for m in model.modules():
if isinstance(lowercase__, bnb.nn.Linearabit ):
return True
return False
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
return next(parameter.parameters() ).device
def __UpperCamelCase ( lowercase__ : List[str], lowercase__ : Any, lowercase__ : Tuple, lowercase__ : Optional[int], lowercase__ : Optional[Any], lowercase__ : Tuple, lowercase__ : List[Any] ):
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase__, lowercase__, 0, dtype=lowercase__, value=lowercase__ )
__lowercase =param_name
__lowercase =model
if "." in tensor_name:
__lowercase =tensor_name.split('.' )
for split in splits[:-1]:
__lowercase =getattr(lowercase__, lowercase__ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
__lowercase =new_module
__lowercase =splits[-1]
# offload weights
__lowercase =False
offload_weight(module._parameters[tensor_name], lowercase__, lowercase__, index=lowercase__ )
if hasattr(module._parameters[tensor_name], 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB, param_name.replace('weight', 'SCB' ), lowercase__, index=lowercase__, )
else:
offload_weight(lowercase__, lowercase__, lowercase__, index=lowercase__ )
offload_weight(lowercase__, param_name.replace('weight', 'SCB' ), lowercase__, index=lowercase__ )
set_module_tensor_to_device(lowercase__, lowercase__, 'meta', dtype=lowercase__, value=torch.empty(*param.size() ) )
| 119 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : Optional[int] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=None ):
"""simple docstring"""
if rng is None:
__UpperCAmelCase = random.Random()
__UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__UpperCAmelCase = np.array(UpperCamelCase__ , dtype=jnp.intaa ).reshape(UpperCamelCase__ )
return output
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
__UpperCAmelCase = ids_tensor(UpperCamelCase__ , vocab_size=2 , rng=UpperCamelCase__ )
# make sure that at least one token is attended to for each batch
__UpperCAmelCase = 1
return attn_mask
@require_flax
class A :
a_ = None
a_ = ()
def snake_case__ ( self : List[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__UpperCAmelCase = 2
__UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
__UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
__UpperCAmelCase = jnp.ones_like(lowerCamelCase_ )
__UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = False
__UpperCAmelCase = max_length
__UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCAmelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
__UpperCAmelCase = pt_model_class(lowerCamelCase_ ).eval()
__UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCamelCase_ , flax_model.params )
__UpperCAmelCase = flax_model.generate(lowerCamelCase_ ).sequences
__UpperCAmelCase = pt_model.generate(torch.tensor(lowerCamelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = False
__UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = True
__UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = False
__UpperCAmelCase = max_length
__UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = False
__UpperCAmelCase = max_length
__UpperCAmelCase = 2
__UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = True
__UpperCAmelCase = max_length
__UpperCAmelCase = 0.8
__UpperCAmelCase = 1_0
__UpperCAmelCase = 0.3
__UpperCAmelCase = 1
__UpperCAmelCase = 8
__UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : List[str] ) -> str:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = max_length
__UpperCAmelCase = 1
__UpperCAmelCase = 8
__UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
__UpperCAmelCase = max_length
__UpperCAmelCase = 2
__UpperCAmelCase = 1
__UpperCAmelCase = 8
__UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : str ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase = False
__UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase = True
__UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
__UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
__UpperCAmelCase = 2
__UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
__UpperCAmelCase = model_class(lowerCamelCase_ )
__UpperCAmelCase = model.generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCamelCase_ )
__UpperCAmelCase = jit(model.generate )
__UpperCAmelCase = jit_generate(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : str ) -> Optional[int]:
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__UpperCAmelCase = '''Hello world'''
__UpperCAmelCase = tokenizer(lowerCamelCase_ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase_ , '''do_samples''' ):
model.generate(lowerCamelCase_ , do_samples=lowerCamelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase_ , '''foo''' ):
__UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCamelCase_ , **lowerCamelCase_ )
| 262 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ :
def __init__( self , snake_case__=2 , snake_case__=3 , snake_case__=64 , snake_case__=None ) -> Dict:
"""simple docstring"""
UpperCAmelCase = np.random.default_rng(lowerCamelCase_ )
UpperCAmelCase = length
UpperCAmelCase = rng.normal(size=(length,) ).astype(np.floataa )
UpperCAmelCase = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
"""simple docstring"""
return self.length
def __getitem__( self , snake_case__ ) -> List[str]:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class UpperCamelCase_ ( torch.nn.Module ):
def __init__( self , snake_case__=0 , snake_case__=0 , snake_case__=False ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCAmelCase = True
def UpperCamelCase_ ( self , snake_case__=None ) -> Optional[Any]:
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase = False
return x * self.a[0] + self.b[0]
class UpperCamelCase_ ( torch.nn.Module ):
def __init__( self , snake_case__=0 , snake_case__=0 , snake_case__=False ) -> Any:
"""simple docstring"""
super().__init__()
UpperCAmelCase = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
UpperCAmelCase = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
UpperCAmelCase = True
def UpperCamelCase_ ( self , snake_case__=None ) -> Any:
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
UpperCAmelCase = False
return x * self.a + self.b
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 16 ):
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
UpperCAmelCase = load_dataset("""csv""" , data_files=lowerCAmelCase )
UpperCAmelCase = datasets["""train"""].unique("""label""" )
UpperCAmelCase = {v: i for i, v in enumerate(lowerCAmelCase )}
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" )
if "label" in examples:
UpperCAmelCase = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=2 )
UpperCAmelCase = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 673 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def UpperCamelCase ( snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :int = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=snake_case__ )[0]
@deprecated(snake_case__ ,"""Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
print("""Extracting""" ,f.name )
with gzip.GzipFile(fileobj=snake_case__ ) as bytestream:
__snake_case :str = _readaa(snake_case__ )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__snake_case :List[str] = _readaa(snake_case__ )
__snake_case :Any = _readaa(snake_case__ )
__snake_case :str = _readaa(snake_case__ )
__snake_case :str = bytestream.read(rows * cols * num_images )
__snake_case :List[Any] = numpy.frombuffer(snake_case__ ,dtype=numpy.uinta )
__snake_case :Optional[int] = data.reshape(snake_case__ ,snake_case__ ,snake_case__ ,1 )
return data
@deprecated(snake_case__ ,"""Please use tf.one_hot on tensors.""" )
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :Optional[Any] = labels_dense.shape[0]
__snake_case :Dict = numpy.arange(snake_case__ ) * num_classes
__snake_case :Union[str, Any] = numpy.zeros((num_labels, num_classes) )
__snake_case :List[Any] = 1
return labels_one_hot
@deprecated(snake_case__ ,"""Please use tf.data to implement this functionality.""" )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : Union[str, Any]=False ,snake_case__ : str=10 ):
'''simple docstring'''
print("""Extracting""" ,f.name )
with gzip.GzipFile(fileobj=snake_case__ ) as bytestream:
__snake_case :List[Any] = _readaa(snake_case__ )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__snake_case :int = _readaa(snake_case__ )
__snake_case :Dict = bytestream.read(snake_case__ )
__snake_case :Any = numpy.frombuffer(snake_case__ ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case__ ,snake_case__ )
return labels
class snake_case__ :
'''simple docstring'''
@deprecated(
lowerCamelCase_ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , a__ , a__ , a__=False , a__=False , a__=dtypes.floataa , a__=True , a__=None , ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case :Tuple = random_seed.get_seed(lowerCamelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__snake_case :List[Any] = dtypes.as_dtype(lowerCamelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__snake_case :Any = 1_00_00
__snake_case :Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__snake_case :Optional[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__snake_case :Optional[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__snake_case :Optional[Any] = images.astype(numpy.floataa )
__snake_case :Optional[int] = numpy.multiply(lowerCamelCase_ , 1.0 / 2_55.0 )
__snake_case :Optional[int] = images
__snake_case :Tuple = labels
__snake_case :Tuple = 0
__snake_case :Tuple = 0
@property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._images
@property
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return self._labels
@property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._num_examples
@property
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._epochs_completed
def __lowercase ( self , a__ , a__=False , a__=True ) -> Optional[Any]:
'''simple docstring'''
if fake_data:
__snake_case :str = [1] * 7_84
__snake_case :int = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase_ )],
[fake_label for _ in range(lowerCamelCase_ )],
)
__snake_case :Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__snake_case :Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase_ )
__snake_case :List[Any] = self.images[perma]
__snake_case :Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__snake_case :Optional[int] = self._num_examples - start
__snake_case :Tuple = self._images[start : self._num_examples]
__snake_case :int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__snake_case :Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase_ )
__snake_case :Any = self.images[perm]
__snake_case :int = self.labels[perm]
# Start next epoch
__snake_case :Union[str, Any] = 0
__snake_case :Optional[int] = batch_size - rest_num_examples
__snake_case :Tuple = self._index_in_epoch
__snake_case :Union[str, Any] = self._images[start:end]
__snake_case :List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__snake_case :Union[str, Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case__ ,"""Please write your own downloading logic.""" )
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : int ,snake_case__ : List[Any] ):
'''simple docstring'''
if not gfile.Exists(snake_case__ ):
gfile.MakeDirs(snake_case__ )
__snake_case :Optional[Any] = os.path.join(snake_case__ ,snake_case__ )
if not gfile.Exists(snake_case__ ):
urllib.request.urlretrieve(snake_case__ ,snake_case__ ) # noqa: S310
with gfile.GFile(snake_case__ ) as f:
__snake_case :Optional[Any] = f.size()
print("""Successfully downloaded""" ,snake_case__ ,snake_case__ ,"""bytes.""" )
return filepath
@deprecated(
snake_case__ ,"""Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""" )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : Tuple=False ,snake_case__ : Any=False ,snake_case__ : Dict=dtypes.floataa ,snake_case__ : Any=True ,snake_case__ : List[Any]=5000 ,snake_case__ : str=None ,snake_case__ : Tuple=DEFAULT_SOURCE_URL ,):
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=snake_case__ ,one_hot=snake_case__ ,dtype=snake_case__ ,seed=snake_case__ )
__snake_case :int = fake()
__snake_case :Tuple = fake()
__snake_case :str = fake()
return _Datasets(train=snake_case__ ,validation=snake_case__ ,test=snake_case__ )
if not source_url: # empty string check
__snake_case :str = DEFAULT_SOURCE_URL
__snake_case :str = """train-images-idx3-ubyte.gz"""
__snake_case :Optional[int] = """train-labels-idx1-ubyte.gz"""
__snake_case :Optional[int] = """t10k-images-idx3-ubyte.gz"""
__snake_case :int = """t10k-labels-idx1-ubyte.gz"""
__snake_case :Union[str, Any] = _maybe_download(
snake_case__ ,snake_case__ ,source_url + train_images_file )
with gfile.Open(snake_case__ ,"""rb""" ) as f:
__snake_case :str = _extract_images(snake_case__ )
__snake_case :List[Any] = _maybe_download(
snake_case__ ,snake_case__ ,source_url + train_labels_file )
with gfile.Open(snake_case__ ,"""rb""" ) as f:
__snake_case :Union[str, Any] = _extract_labels(snake_case__ ,one_hot=snake_case__ )
__snake_case :List[Any] = _maybe_download(
snake_case__ ,snake_case__ ,source_url + test_images_file )
with gfile.Open(snake_case__ ,"""rb""" ) as f:
__snake_case :Tuple = _extract_images(snake_case__ )
__snake_case :Optional[int] = _maybe_download(
snake_case__ ,snake_case__ ,source_url + test_labels_file )
with gfile.Open(snake_case__ ,"""rb""" ) as f:
__snake_case :Union[str, Any] = _extract_labels(snake_case__ ,one_hot=snake_case__ )
if not 0 <= validation_size <= len(snake_case__ ):
__snake_case :str = (
"""Validation size should be between 0 and """
f'''{len(snake_case__ )}. Received: {validation_size}.'''
)
raise ValueError(snake_case__ )
__snake_case :Optional[int] = train_images[:validation_size]
__snake_case :Optional[int] = train_labels[:validation_size]
__snake_case :List[str] = train_images[validation_size:]
__snake_case :Optional[Any] = train_labels[validation_size:]
__snake_case :Optional[Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__snake_case :Optional[int] = _DataSet(snake_case__ ,snake_case__ ,**snake_case__ )
__snake_case :int = _DataSet(snake_case__ ,snake_case__ ,**snake_case__ )
__snake_case :Tuple = _DataSet(snake_case__ ,snake_case__ ,**snake_case__ )
return _Datasets(train=snake_case__ ,validation=snake_case__ ,test=snake_case__ )
| 455 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCamelCase ( a__ ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=0.01 , SCREAMING_SNAKE_CASE_ : Any=1_0_0_0 ):
_a = p_stop
_a = max_length
def __iter__( self : List[Any] ):
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Optional[int]=True ):
_a = [
BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
for i in range(2 )
]
_a = [list(lowerCamelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCamelCase_ ) for shard in batch_sampler_shards] , [len(lowerCamelCase_ ) for e in expected] )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCAmelCase ( self : List[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ )
def _UpperCAmelCase ( self : List[str] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCamelCase_ )
_a = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , even_batches=lowerCamelCase_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=lowerCamelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = [[], []]
self.check_batch_sampler_shards(lowerCamelCase_ , lowerCamelCase_ , split_batches=lowerCamelCase_ , even_batches=lowerCamelCase_ )
def _UpperCAmelCase ( self : Any ):
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
_a = [BatchSamplerShard(lowerCamelCase_ , 2 , lowerCamelCase_ , even_batches=lowerCamelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=False ):
random.seed(lowerCamelCase_ )
_a = list(lowerCamelCase_ )
_a = [
IterableDatasetShard(
lowerCamelCase_ , batch_size=lowerCamelCase_ , drop_last=lowerCamelCase_ , num_processes=lowerCamelCase_ , process_index=lowerCamelCase_ , split_batches=lowerCamelCase_ , )
for i in range(lowerCamelCase_ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCamelCase_ )
iterable_dataset_lists.append(list(lowerCamelCase_ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(len(lowerCamelCase_ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
reference += reference
self.assertListEqual(lowerCamelCase_ , reference[: len(lowerCamelCase_ )] )
def _UpperCAmelCase ( self : str ):
_a = 4_2
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
self.check_iterable_dataset_shards(lowerCamelCase_ , lowerCamelCase_ , batch_size=4 , drop_last=lowerCamelCase_ , split_batches=lowerCamelCase_ )
def _UpperCAmelCase ( self : int ):
_a = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=lowerCamelCase_ )
_a = SkipBatchSampler(lowerCamelCase_ , 2 )
self.assertListEqual(list(lowerCamelCase_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _UpperCAmelCase ( self : Dict ):
_a = DataLoader(list(range(1_6 ) ) , batch_size=4 )
_a = skip_first_batches(lowerCamelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def _UpperCAmelCase ( self : Optional[int] ):
_a = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _UpperCAmelCase ( self : int ):
Accelerator()
_a = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCamelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 562 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a_ = ['''small''', '''medium''', '''large''']
a_ = '''lm_head.decoder.weight'''
a_ = '''lm_head.weight'''
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = d.pop(SCREAMING_SNAKE_CASE__ )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
a_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a_ = os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
a_ = F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 480 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : str = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : int=None , UpperCamelCase : Any=None , UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
A__ : Union[str, Any] =True
while ask_again:
A__ : List[str] =input(UpperCamelCase )
try:
if default is not None and len(UpperCamelCase ) == 0:
return default
return convert_value(UpperCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(UpperCamelCase )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int]=[] , UpperCamelCase : List[Any]=None , UpperCamelCase : Tuple=0 ):
"""simple docstring"""
A__ : int =BulletMenu(UpperCamelCase , UpperCamelCase )
A__ : int =menu.run(default_choice=UpperCamelCase )
return convert_value(UpperCamelCase ) if convert_value is not None else result
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =int(UpperCamelCase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Dict =int(UpperCamelCase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : List[str] =int(UpperCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : List[Any] =int(UpperCamelCase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Any =int(UpperCamelCase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCAmelCase ( argparse.RawDescriptionHelpFormatter):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
A__ : Union[str, Any] =super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A__ : Optional[int] =usage.replace("<command> [<args>] " , "" )
return usage
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 0 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__A = logging.getLogger(__name__)
if __name__ == "__main__":
__A = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_05_22, type=int)
__A = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__A = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__A = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A = [0] * args.vocab_size
for k, v in counter.items():
__A = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 68 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Optional[Any] ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =["ConditionalDetrFeatureExtractor"]
__lowerCAmelCase : Dict =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 440 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt"""}
snake_case_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
snake_case_ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
snake_case_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a__ ( a__ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = ConvBertTokenizer
def __init__(self : Dict, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : str=True, __UpperCAmelCase : Dict="[UNK]", __UpperCAmelCase : str="[SEP]", __UpperCAmelCase : Dict="[PAD]", __UpperCAmelCase : Dict="[CLS]", __UpperCAmelCase : Optional[Any]="[MASK]", __UpperCAmelCase : Optional[int]=True, __UpperCAmelCase : List[Any]=None, **__UpperCAmelCase : Optional[Any], ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, **lowerCamelCase_, )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_, normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Tuple = do_lower_case
SCREAMING_SNAKE_CASE : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Tuple = normalizer_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = do_lower_case
def lowercase__ (self : Optional[int], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ (self : Any, __UpperCAmelCase : List[Any], __UpperCAmelCase : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ (self : List[Any], __UpperCAmelCase : str, __UpperCAmelCase : Union[str, Any] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 507 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _A ( __snake_case :List[str] ) -> str:
"""simple docstring"""
return "".join(sorted(__snake_case ) )
def _A ( __snake_case :Tuple ) -> list[str]:
"""simple docstring"""
return word_by_signature[signature(__snake_case )]
_snake_case : List[str] = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_snake_case : List[str] = sorted({word.strip().lower() for word in data.splitlines()})
_snake_case : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_snake_case : Optional[int] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 693 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCAmelCase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] = "laptop" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://www.amazon.in/laptop/s?k={product}"""
__UpperCAmelCase = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
__UpperCAmelCase = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
__UpperCAmelCase = item.ha.text
__UpperCAmelCase = '''https://www.amazon.in/''' + item.ha.a['''href''']
__UpperCAmelCase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
__UpperCAmelCase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
__UpperCAmelCase = '''Not available'''
try:
__UpperCAmelCase = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
__UpperCAmelCase = ''''''
try:
__UpperCAmelCase = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_0_0 )
except ValueError:
__UpperCAmelCase = float('''nan''' )
except AttributeError:
pass
__UpperCAmelCase = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__UpperCAmelCase = ''' '''
__UpperCAmelCase = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__lowerCAmelCase : str = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 262 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(lowerCamelCase_ ) , torch_builtin(lowerCamelCase_ ) ) )
self.assertFalse(torch.allclose(gelu_python(lowerCamelCase_ ) , gelu_new(lowerCamelCase_ ) ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCAmelCase = get_activation("""gelu""" )
UpperCAmelCase = get_activation("""gelu_10""" )
UpperCAmelCase = torch_builtin(lowerCamelCase_ )
UpperCAmelCase = geluaa(lowerCamelCase_ )
UpperCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(lowerCamelCase_ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(lowerCamelCase_ ):
get_activation("""bogus""" )
with self.assertRaises(lowerCamelCase_ ):
get_activation(lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = get_activation("""gelu""" )
UpperCAmelCase = 1
UpperCAmelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(lowerCamelCase_ ):
UpperCAmelCase = acta.a
| 673 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class snake_case__ ( a__):
'''simple docstring'''
def __init__( self , a__="" , a__="train" ) -> Optional[int]:
'''simple docstring'''
assert os.path.isdir(lowerCamelCase_ )
__snake_case :Optional[int] = []
__snake_case :List[str] = os.listdir(lowerCamelCase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
__snake_case :Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not os.path.isfile(lowerCamelCase_ ):
continue
self.documents.append(lowerCamelCase_ )
def __len__( self ) -> List[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , a__ ) -> Dict:
'''simple docstring'''
__snake_case :List[str] = self.documents[idx]
__snake_case :Optional[Any] = document_path.split("""/""" )[-1]
with open(lowerCamelCase_ , encoding="""utf-8""" ) as source:
__snake_case :List[Any] = source.read()
__snake_case , __snake_case :List[str] = process_story(lowerCamelCase_ )
return document_name, story_lines, summary_lines
def UpperCamelCase ( snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Tuple = list(filter(lambda snake_case__ : len(snake_case__ ) != 0 ,[line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
__snake_case :List[Any] = [_add_missing_period(snake_case__ ) for line in nonempty_lines]
# gather article lines
__snake_case :Union[str, Any] = []
__snake_case :Any = deque(snake_case__ )
while True:
try:
__snake_case :Any = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(snake_case__ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
__snake_case :Any = list(filter(lambda snake_case__ : not t.startswith("""@highlight""" ) ,snake_case__ ) )
return story_lines, summary_lines
def UpperCamelCase ( snake_case__ : Union[str, Any] ):
'''simple docstring'''
__snake_case :List[str] = [""".""", """!""", """?""", """...""", """\'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def UpperCamelCase ( snake_case__ : Tuple ,snake_case__ : Tuple ,snake_case__ : Optional[Any] ):
'''simple docstring'''
if len(snake_case__ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(snake_case__ )) )
return sequence
def UpperCamelCase ( snake_case__ : str ,snake_case__ : str ):
'''simple docstring'''
__snake_case :Optional[int] = torch.ones_like(snake_case__ )
__snake_case :Dict = sequence == pad_token_id
__snake_case :Optional[int] = 0
return mask
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : Dict ,snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Dict = [tokenizer.encode(snake_case__ ) for line in story_lines]
__snake_case :List[Any] = [token for sentence in story_lines_token_ids for token in sentence]
__snake_case :Union[str, Any] = [tokenizer.encode(snake_case__ ) for line in summary_lines]
__snake_case :str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : Optional[int] ):
'''simple docstring'''
__snake_case :Optional[Any] = []
for sequence in batch:
__snake_case :Union[str, Any] = -1
__snake_case :Optional[Any] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(snake_case__ )
return torch.tensor(snake_case__ )
| 455 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _UpperCamelCase ( a__ ):
'''simple docstring'''
_A = "mvp"
_A = ["past_key_values"]
_A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=5_0_2_6_7 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_6 , SCREAMING_SNAKE_CASE_ : int=1_2 , SCREAMING_SNAKE_CASE_ : Dict=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : Dict=0.0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : int=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : str=0.0 , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : int=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_0_0 , SCREAMING_SNAKE_CASE_ : Any=8_0_0 , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
_a = vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = classifier_dropout
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = use_prompt
_a = prompt_length
_a = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , forced_eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , lowerCamelCase_ ):
_a = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'The config can simply be saved and uploaded again to be fixed.' )
| 562 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 480 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
def lowercase ( UpperCamelCase : Tuple = 3 , UpperCamelCase : List[Any] = 7 , UpperCamelCase : List[Any] = 1000000 ):
"""simple docstring"""
A__ : Union[str, Any] =0
A__ : int =1
for current_denominator in range(1 , limit + 1 ):
A__ : Tuple =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A__ : Any =current_numerator
A__ : Union[str, Any] =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 656 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 0 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( a__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = KandinskyVaaPriorPipeline
lowerCamelCase : List[str] = ["prompt"]
lowerCamelCase : Union[str, Any] = ["prompt", "negative_prompt"]
lowerCamelCase : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowerCamelCase : str = False
@property
def _a ( self : Union[str, Any] ) -> Dict:
return 32
@property
def _a ( self : Optional[Any] ) -> str:
return 32
@property
def _a ( self : int ) -> int:
return self.time_input_dim
@property
def _a ( self : Tuple ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _a ( self : List[Any] ) -> List[Any]:
return 100
@property
def _a ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def _a ( self : Optional[int] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase ={
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__UpperCAmelCase =PriorTransformer(**lowerCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCAmelCase =nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _a ( self : Tuple ) -> int:
torch.manual_seed(0 )
__UpperCAmelCase =CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__UpperCAmelCase =CLIPVisionModelWithProjection(lowerCamelCase_ )
return model
@property
def _a ( self : Optional[Any] ) -> Any:
__UpperCAmelCase =CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def _a ( self : Optional[int] ) -> Dict:
__UpperCAmelCase =self.dummy_prior
__UpperCAmelCase =self.dummy_image_encoder
__UpperCAmelCase =self.dummy_text_encoder
__UpperCAmelCase =self.dummy_tokenizer
__UpperCAmelCase =self.dummy_image_processor
__UpperCAmelCase =UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=lowerCamelCase_ , clip_sample_range=10.0 , )
__UpperCAmelCase ={
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=0 ) -> int:
if str(lowerCamelCase_ ).startswith("""mps""" ):
__UpperCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
__UpperCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
__UpperCAmelCase ={
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : List[Any] ) -> str:
__UpperCAmelCase ="""cpu"""
__UpperCAmelCase =self.get_dummy_components()
__UpperCAmelCase =self.pipeline_class(**lowerCamelCase_ )
__UpperCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
__UpperCAmelCase =pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
__UpperCAmelCase =output.image_embeds
__UpperCAmelCase =pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
__UpperCAmelCase =image[0, -10:]
__UpperCAmelCase =image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__UpperCAmelCase =np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self : Optional[int] ) -> Tuple:
__UpperCAmelCase =torch_device == """cpu"""
__UpperCAmelCase =True
__UpperCAmelCase =False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
@skip_mps
def _a ( self : Any ) -> Tuple:
__UpperCAmelCase =torch_device == """cpu"""
__UpperCAmelCase =False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
| 68 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Any =logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
def __init__( self :Tuple , *lowercase_ :Any , **lowercase_ :List[Any] )-> Optional[Any]:
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any]=None )-> Union[str, Any]:
A__ = {}
if top_k is not None:
A__ = top_k
return {}, {}, postprocess_params
def __call__( self :Optional[Any] , lowercase_ :Union[str, Any] , **lowercase_ :Union[str, Any] )-> Tuple:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase_ ( self :Any , lowercase_ :List[str] )-> Optional[int]:
A__ = load_image(lowerCamelCase_ )
A__ = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self :List[str] , lowercase_ :int )-> Dict:
A__ = self.model(**lowerCamelCase_ )
return model_outputs
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :int , lowercase_ :List[str]=5 )-> List[Any]:
if top_k > self.model.config.num_labels:
A__ = self.model.config.num_labels
if self.framework == "pt":
A__ = model_outputs.logits.softmax(-1 )[0]
A__, A__ = probs.topk(lowerCamelCase_ )
elif self.framework == "tf":
A__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
A__ = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
A__, A__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
A__ = scores.tolist()
A__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ , lowerCamelCase_ )]
| 440 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 0 |
'''simple docstring'''
from math import isqrt
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] = 10**6 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 507 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece.model')
_snake_case : List[str] = {'target_lang': 'fi', 'source_lang': 'en'}
_snake_case : List[str] = '>>zh<<'
_snake_case : List[str] = 'Helsinki-NLP/'
if is_torch_available():
_snake_case : Dict = 'pt'
elif is_tf_available():
_snake_case : Any = 'tf'
else:
_snake_case : Union[str, Any] = 'jax'
@require_sentencepiece
class __SCREAMING_SNAKE_CASE ( a__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =MarianTokenizer
SCREAMING_SNAKE_CASE__ =False
SCREAMING_SNAKE_CASE__ =True
def __lowerCAmelCase ( self ) -> List[str]:
super().setUp()
__SCREAMING_SNAKE_CASE = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
__SCREAMING_SNAKE_CASE = Path(self.tmpdirname )
save_json(lowerCamelCase_, save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(lowerCamelCase_, save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(lowerCamelCase_, save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(lowerCamelCase_, save_dir / VOCAB_FILES_NAMES["target_spm"] )
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self, **_a ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname, **lowerCamelCase_ )
def __lowerCAmelCase ( self, _a ) -> Tuple:
return (
"This is a test",
"This is a test",
)
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = "</s>"
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ), lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ), lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "</s>" )
self.assertEqual(vocab_keys[1], "<unk>" )
self.assertEqual(vocab_keys[-1], "<pad>" )
self.assertEqual(len(lowerCamelCase_ ), 9 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
__SCREAMING_SNAKE_CASE = en_de_tokenizer(["I am a small frog"], return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = [38, 1_21, 14, 6_97, 3_88_48, 0]
self.assertListEqual(lowerCamelCase_, batch.input_ids[0] )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = [x.name for x in Path(lowerCamelCase_ ).glob("*" )]
self.assertIn("source.spm", lowerCamelCase_ )
MarianTokenizer.from_pretrained(lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(
["I am a small frog" * 10_00, "I am a small frog"], padding=lowerCamelCase_, truncation=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual(batch.input_ids.shape, (2, 5_12) )
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = tok(["I am a tiny frog", "I am a small frog"], padding=lowerCamelCase_, return_tensors=lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
# fmt: off
__SCREAMING_SNAKE_CASE = {"input_ids": [[4_34_95, 4_62, 20, 4_21_64, 13_69, 52, 4_64, 1_32, 17_03, 4_92, 13, 74_91, 3_89_99, 6, 8, 4_64, 1_32, 17_03, 4_92, 13, 46_69, 3_78_67, 13, 75_25, 27, 15_93, 9_88, 13, 3_39_72, 70_29, 6, 20, 82_51, 3_83, 2, 2_70, 58_66, 37_88, 2, 23_53, 82_51, 1_23_38, 2, 1_39_58, 3_87, 2, 36_29, 69_53, 1_88, 29_00, 2, 1_39_58, 80_11, 1_15_01, 23, 84_60, 40_73, 3_40_09, 20, 4_35, 1_14_39, 27, 8, 84_60, 40_73, 60_04, 20, 99_88, 3_75, 27, 33, 2_66, 19_45, 10_76, 13_50, 3_78_67, 32_88, 5, 5_77, 10_76, 43_74, 8, 50_82, 5, 2_64_53, 2_57, 5_56, 4_03, 2, 2_42, 1_32, 3_83, 3_16, 4_92, 8, 1_07_67, 6, 3_16, 3_04, 42_39, 3, 0], [1_48, 1_57_22, 19, 18_39, 12, 13_50, 13, 2_23_27, 50_82, 54_18, 4_75_67, 3_59_38, 59, 3_18, 1_95_52, 1_08, 21_83, 54, 1_49_76, 48_35, 32, 5_47, 11_14, 8, 3_15, 24_17, 5, 92, 1_90_88, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00], [36, 63_95, 1_25_70, 3_91_47, 1_15_97, 6, 2_66, 4, 4_54_05, 72_96, 3, 0, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00, 5_81_00]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_, model_name="Helsinki-NLP/opus-mt-en-de", revision="1a8c2263da11e68e50938f97e10cd57820bd504c", decode_kwargs={"use_source_tokenizer": True}, )
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
__SCREAMING_SNAKE_CASE = "Tämä on testi"
__SCREAMING_SNAKE_CASE = "This is a test"
__SCREAMING_SNAKE_CASE = [76, 7, 20_47, 2]
__SCREAMING_SNAKE_CASE = [69, 12, 11, 9_40, 2]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer(text_target=lowerCamelCase_ ).input_ids
self.assertListEqual(lowerCamelCase_, lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = tokenizer.decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_, lowerCamelCase_ )
| 693 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
}
UpperCAmelCase = {
'''google/rembert''': 256,
}
class lowerCAmelCase ( a__ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any]=False , __lowercase : Tuple=True , __lowercase : Optional[Any]=True , __lowercase : Tuple="[CLS]" , __lowercase : int="[SEP]" , __lowercase : int="[UNK]" , __lowercase : str="[SEP]" , __lowercase : Dict="[PAD]" , __lowercase : List[str]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : int , ):
"""simple docstring"""
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
__lowercase =do_lower_case
__lowercase =remove_space
__lowercase =keep_accents
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor()
self.sp_model.Load(lowerCamelCase_ )
@property
def snake_case ( self : Any ):
"""simple docstring"""
return len(self.sp_model )
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase ={self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : Optional[Any] , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =d
__lowercase =spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def snake_case ( self : int , __lowercase : Union[str, Any] , __lowercase : Tuple=False ):
"""simple docstring"""
__lowercase =self.sp_model.EncodeAsPieces(lowerCamelCase_ )
return pieces
def snake_case ( self : Union[str, Any] , __lowercase : Optional[Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCamelCase_ )
def snake_case ( self : List[str] , __lowercase : List[Any] ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCamelCase_ )
def snake_case ( self : int , __lowercase : Optional[int] ):
"""simple docstring"""
__lowercase =self.sp_model.decode_pieces(lowerCamelCase_ )
return out_string
def snake_case ( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : Dict = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : int , __lowercase : str , __lowercase : List[str] = None , __lowercase : Tuple = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
def snake_case ( self : List[str] , __lowercase : Any , __lowercase : str = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : str , __lowercase : Optional[Any] , __lowercase : str = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
__lowercase =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 119 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 0 |
'''simple docstring'''
__lowerCAmelCase : List[str] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
__lowerCAmelCase : List[str] = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__UpperCAmelCase = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
lowerCAmelCase_ : Union[str, Any] = '''naver-clova-ix/donut-base'''
class UpperCamelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = DonutProcessor.from_pretrained(lowerCamelCase_ )
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
UpperCAmelCase = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
UpperCAmelCase = self.processor.tokenajson(lowerCamelCase_ )
self.assertDictEqual(lowerCamelCase_ , lowerCamelCase_ )
| 673 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=3 , a__=32 , a__=3 , a__=10 , a__=[10, 20, 30, 40] , a__=[1, 1, 2, 1] , a__=True , a__=True , a__="relu" , a__=3 , a__=None , ) -> Any:
'''simple docstring'''
__snake_case :Optional[int] = parent
__snake_case :Optional[Any] = batch_size
__snake_case :int = image_size
__snake_case :Dict = num_channels
__snake_case :Any = embeddings_size
__snake_case :Dict = hidden_sizes
__snake_case :Union[str, Any] = depths
__snake_case :List[str] = is_training
__snake_case :List[Any] = use_labels
__snake_case :int = hidden_act
__snake_case :List[str] = num_labels
__snake_case :Union[str, Any] = scope
__snake_case :str = len(lowerCamelCase_ )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case :Dict = self.get_config()
return config, pixel_values
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowercase ( self , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = FlaxRegNetModel(config=lowerCamelCase_ )
__snake_case :Dict = model(lowerCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowercase ( self , a__ , a__ ) -> Any:
'''simple docstring'''
__snake_case :int = self.num_labels
__snake_case :Optional[Any] = FlaxRegNetForImageClassification(config=lowerCamelCase_ )
__snake_case :Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case :Optional[int] = config_and_inputs
__snake_case :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class snake_case__ ( a__ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase : List[Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[int] = False
def __lowercase ( self ) -> None:
'''simple docstring'''
__snake_case :List[Any] = FlaxRegNetModelTester(self )
__snake_case :List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
return
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :Optional[int] = model_class(lowerCamelCase_ )
__snake_case :Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case :Any = [*signature.parameters.keys()]
__snake_case :Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def __lowercase ( self ) -> Any:
'''simple docstring'''
def check_hidden_states_output(a__ , a__ , a__ ):
__snake_case :Optional[int] = model_class(lowerCamelCase_ )
__snake_case :int = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
__snake_case :str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case :Any = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
__snake_case , __snake_case :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case :Optional[Any] = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case :Dict = True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case :Any = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
__snake_case :Dict = model_class(lowerCamelCase_ )
@jax.jit
def model_jitted(a__ , **a__ ):
return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__snake_case :List[str] = model_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__snake_case :Optional[Any] = model_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[Any] = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__snake_case :Optional[int] = self.default_image_processor
__snake_case :Optional[int] = prepare_img()
__snake_case :Tuple = image_processor(images=lowerCamelCase_ , return_tensors="""np""" )
__snake_case :List[str] = model(**lowerCamelCase_ )
# verify the logits
__snake_case :List[str] = (1, 10_00)
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
__snake_case :Optional[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
| 455 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
_A = AltDiffusionPipeline
_A = TEXT_TO_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_BATCH_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
_A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCAmelCase ( self : List[str] ):
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
_a = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
_a = CLIPTextModel(lowerCamelCase_ )
_a = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_a = 7_7
_a = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=0 ):
if str(lowerCamelCase_ ).startswith('mps' ):
_a = torch.manual_seed(lowerCamelCase_ )
else:
_a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCAmelCase ( self : List[Any] ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _UpperCAmelCase ( self : Any ):
_a = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
_a = text_encoder
_a = AltDiffusionPipeline(**lowerCamelCase_ )
_a = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a = self.get_dummy_inputs(lowerCamelCase_ )
_a = 'A photo of an astronaut'
_a = alt_pipe(**lowerCamelCase_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self : List[str] ):
_a = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a = self.get_dummy_components()
_a = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
_a = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
_a = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
_a = text_encoder
_a = AltDiffusionPipeline(**lowerCamelCase_ )
_a = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a = self.get_dummy_inputs(lowerCamelCase_ )
_a = alt_pipe(**lowerCamelCase_ )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_a = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[int] ):
# make sure here that pndm scheduler skips prk
_a = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCamelCase_ )
_a = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a = 'A painting of a squirrel eating a burger'
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='np' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self : Tuple ):
_a = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_a = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
_a = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a = 'A painting of a squirrel eating a burger'
_a = torch.manual_seed(0 )
_a = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='numpy' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_a = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 562 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 0 |
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return AutoModel.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 480 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( a__ , a__ , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = AutoencoderKL
__magic_name__ : Any = "sample"
__magic_name__ : Optional[int] = 1E-2
@property
def _UpperCAmelCase ( self : int ):
A__ : List[Any] =4
A__ : str =3
A__ : Optional[Any] =(32, 32)
A__ : Dict =floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def _UpperCAmelCase ( self : str ):
return (3, 32, 32)
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return (3, 32, 32)
def _UpperCAmelCase ( self : str ):
A__ : Optional[Any] ={
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
A__ : Any =self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self : List[str] ):
pass
def _UpperCAmelCase ( self : List[str] ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _UpperCAmelCase ( self : int ):
# enable deterministic behavior for gradient checkpointing
A__ , A__ : Optional[Any] =self.prepare_init_args_and_inputs_for_common()
A__ : Any =self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
A__ : Dict =model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
A__ : List[Any] =torch.randn_like(lowerCamelCase_ )
A__ : Optional[Any] =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
A__ : Any =self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
A__ : Dict =model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
A__ : Dict =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
A__ : Any =dict(model.named_parameters() )
A__ : int =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _UpperCAmelCase ( self : int ):
A__ , A__ : int =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase_ )
A__ : Any =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
A__ : List[Any] =model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
A__ : Any =torch.manual_seed(0 )
else:
A__ : Optional[int] =torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
A__ : List[Any] =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ : Optional[Any] =image.to(lowerCamelCase_ )
with torch.no_grad():
A__ : Any =model(lowerCamelCase_ , sample_posterior=lowerCamelCase_ , generator=lowerCamelCase_ ).sample
A__ : Union[str, Any] =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
A__ : Any =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
A__ : List[str] =torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
A__ : Tuple =torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-2 ) )
@slow
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
return F'''gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def _UpperCAmelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Any=(4, 3, 512, 512) , UpperCamelCase__ : int=False ):
A__ : int =torch.floataa if fpaa else torch.floataa
A__ : int =torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ , lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any]="CompVis/stable-diffusion-v1-4" , UpperCamelCase__ : Tuple=False ):
A__ : Union[str, Any] ="fp16" if fpaa else None
A__ : str =torch.floataa if fpaa else torch.floataa
A__ : List[str] =AutoencoderKL.from_pretrained(
lowerCamelCase_ , subfolder="vae" , torch_dtype=lowerCamelCase_ , revision=lowerCamelCase_ , )
model.to(lowerCamelCase_ ).eval()
return model
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
A__ : Tuple =self.get_sd_vae_model()
A__ : Any =self.get_sd_image(lowerCamelCase_ )
A__ : Optional[Any] =self.get_generator(lowerCamelCase_ )
with torch.no_grad():
A__ : Optional[int] =model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
A__ : Optional[Any] =sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ : Any =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Tuple =self.get_sd_vae_model(fpaa=lowerCamelCase_ )
A__ : List[Any] =self.get_sd_image(lowerCamelCase_ , fpaa=lowerCamelCase_ )
A__ : Tuple =self.get_generator(lowerCamelCase_ )
with torch.no_grad():
A__ : str =model(lowerCamelCase_ , generator=lowerCamelCase_ , sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
A__ : str =sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ : List[Any] =torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
A__ : Dict =self.get_sd_vae_model()
A__ : str =self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
A__ : Union[str, Any] =model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
A__ : List[Any] =sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ : Tuple =torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ):
A__ : Any =self.get_sd_vae_model()
A__ : Optional[Any] =self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ : Union[str, Any] =model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ : Dict =sample[-1, -2:, :2, -2:].flatten().cpu()
A__ : int =torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
A__ : Tuple =self.get_sd_vae_model(fpaa=lowerCamelCase_ )
A__ : List[str] =self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
A__ : Any =model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ : List[Any] =sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ : List[str] =torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : str ):
A__ : Dict =self.get_sd_vae_model(fpaa=lowerCamelCase_ )
A__ : Optional[Any] =self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase_ )
with torch.no_grad():
A__ : Tuple =model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ : List[str] =model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : int ):
A__ : Tuple =self.get_sd_vae_model()
A__ : Optional[int] =self.get_sd_image(lowerCamelCase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ : Any =model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ : List[str] =model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.get_sd_vae_model()
A__ : str =self.get_sd_image(lowerCamelCase_ )
A__ : List[Any] =self.get_generator(lowerCamelCase_ )
with torch.no_grad():
A__ : Union[str, Any] =model.encode(lowerCamelCase_ ).latent_dist
A__ : Any =dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
A__ : Optional[Any] =sample[0, -1, -3:, -3:].flatten().cpu()
A__ : Dict =torch.tensor(lowerCamelCase_ )
A__ : Dict =3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ )
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 0 |
from __future__ import annotations
def lowercase__ ( A_: List[Any] , A_: int , A_: Dict , A_: int , A_: List[Any] , ) -> None:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A_ , A_ , )
def lowercase__ ( A_: Optional[Any] ) -> None:
"""simple docstring"""
__UpperCAmelCase =[]
depth_first_search([] , [] , [] , A_ , A_ )
# Print all the boards
for board in boards:
for column in board:
print(A_ )
print("""""" )
print(len(A_ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 68 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self :str , lowercase_ :Any , lowercase_ :Optional[Any]=13 , lowercase_ :List[str]=7 , lowercase_ :int=True , lowercase_ :int=True , lowercase_ :Optional[int]=False , lowercase_ :str=True , lowercase_ :List[str]=99 , lowercase_ :List[Any]=32 , lowercase_ :List[str]=5 , lowercase_ :Tuple=4 , lowercase_ :Tuple=37 , lowercase_ :str="gelu" , lowercase_ :str=0.1 , lowercase_ :Union[str, Any]=0.1 , lowercase_ :Any=5_12 , lowercase_ :List[Any]=16 , lowercase_ :Any=2 , lowercase_ :Tuple=0.0_2 , lowercase_ :Tuple=3 , lowercase_ :Dict=4 , lowercase_ :Optional[int]=None , )-> List[str]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :List[str] )-> Any:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self :Any , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :int , lowercase_ :List[str] , lowercase_ :List[str] , lowercase_ :Optional[int] , lowercase_ :str )-> Dict:
A__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
A__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :Any , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :str , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , )-> Optional[Any]:
A__ = BioGptForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Any , *lowercase_ :Tuple )-> str:
A__ = BioGptModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# create attention mask
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
A__ = self.seq_length // 2
A__ = 0
# first forward pass
A__, A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ = ids_tensor((1,) , lowerCamelCase_ ).item() + 1
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ = random_other_next_tokens
# append to next input_ids and attn_mask
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_ )] , dim=1 , )
# get two different outputs
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["last_hidden_state"]
A__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_ )["last_hidden_state"]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :int , lowercase_ :Tuple , lowercase_ :Optional[Any] , *lowercase_ :int )-> Tuple:
A__ = BioGptModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
A__ = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_ )
# first forward pass
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_ )
A__, A__ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )["last_hidden_state"]
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_ )[
"last_hidden_state"
]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :str , lowercase_ :Tuple , lowercase_ :Dict , *lowercase_ :Tuple , lowercase_ :str=False )-> List[Any]:
A__ = BioGptForCausalLM(lowerCamelCase_ )
model.to(lowerCamelCase_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ = model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCAmelCase_ ( self :int , lowercase_ :Any , *lowercase_ :List[Any] )-> Optional[Any]:
A__ = BioGptModel(lowerCamelCase_ )
A__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def UpperCAmelCase_ ( self :int , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :Union[str, Any] , *lowercase_ :str )-> str:
A__ = self.num_labels
A__ = BioGptForTokenClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :Dict )-> str:
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , a__ , unittest.TestCase ):
__lowercase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__lowercase = (BioGptForCausalLM,) if is_torch_available() else ()
__lowercase = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase = False
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
A__ = BioGptModelTester(self )
A__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def UpperCAmelCase_ ( self :Dict )-> Any:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase_ ( self :Dict )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def UpperCAmelCase_ ( self :Tuple )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_ )
def UpperCAmelCase_ ( self :Dict )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]:
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCamelCase_ )
A__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
A__ = "left"
# Define PAD Token = EOS Token = 50256
A__ = tokenizer.eos_token
A__ = model.config.eos_token_id
# use different length sentences to test batching
A__ = [
"Hello, my dog is a little",
"Today, I",
]
A__ = tokenizer(lowerCamelCase_ , return_tensors="pt" , padding=lowerCamelCase_ )
A__ = inputs["input_ids"].to(lowerCamelCase_ )
A__ = model.generate(
input_ids=lowerCamelCase_ , attention_mask=inputs["attention_mask"].to(lowerCamelCase_ ) , )
A__ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowerCamelCase_ )
A__ = model.generate(input_ids=lowerCamelCase_ )
A__ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
A__ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowerCamelCase_ )
A__ = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings )
A__ = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
A__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_ )
A__ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_ )
A__ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence] )
@slow
def UpperCAmelCase_ ( self :str )-> Tuple:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BioGptModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(lowerCamelCase_ )
A__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = 3
A__ = "multi_label_classification"
A__ = input_dict["input_ids"]
A__ = input_ids.ne(1 ).to(lowerCamelCase_ )
A__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ = BioGptForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
A__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
A__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
A__ = model(lowerCamelCase_ )[0]
A__ = 4_23_84
A__ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
A__ = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
A__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
A__ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowerCamelCase_ )
torch.manual_seed(0 )
A__ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowerCamelCase_ )
A__ = model.generate(
**lowerCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCamelCase_ , )
A__ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_ )
A__ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 440 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a__ :
def __init__(self : List[Any], __UpperCAmelCase : Any, __UpperCAmelCase : Optional[Any]=13, __UpperCAmelCase : Optional[Any]=10, __UpperCAmelCase : int=3, __UpperCAmelCase : int=2, __UpperCAmelCase : Optional[Any]=2, __UpperCAmelCase : Dict=2, __UpperCAmelCase : str=True, __UpperCAmelCase : List[str]=True, __UpperCAmelCase : List[Any]=32, __UpperCAmelCase : Tuple=5, __UpperCAmelCase : Optional[int]=4, __UpperCAmelCase : List[str]=37, __UpperCAmelCase : Any="gelu", __UpperCAmelCase : Optional[int]=0.1, __UpperCAmelCase : Any=0.1, __UpperCAmelCase : str=10, __UpperCAmelCase : Any=0.02, __UpperCAmelCase : Optional[Any]=0.9, __UpperCAmelCase : List[str]=None, ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[Any] = image_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : Any = tubelet_size
SCREAMING_SNAKE_CASE : str = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = is_training
SCREAMING_SNAKE_CASE : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = mask_ratio
SCREAMING_SNAKE_CASE : Dict = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : str = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE : Tuple = int(mask_ratio * self.seq_length )
def lowercase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = self.get_config()
return config, pixel_values, labels
def lowercase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
def lowercase__ (self : int, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VideoMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ (self : List[str], __UpperCAmelCase : str, __UpperCAmelCase : List[Any], __UpperCAmelCase : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VideoMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones((self.num_masks,) )
SCREAMING_SNAKE_CASE : List[str] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : List[Any] = mask.expand(self.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_, lowerCamelCase_ )
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE : Dict = mask.sum().item()
SCREAMING_SNAKE_CASE : Dict = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( a__, a__, unittest.TestCase ):
__magic_name__ : List[Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__magic_name__ : Tuple = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
__magic_name__ : Any = False
__magic_name__ : Dict = False
def lowercase__ (self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = VideoMAEModelTester(self )
SCREAMING_SNAKE_CASE : int = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=37 )
def lowercase__ (self : List[str], __UpperCAmelCase : Optional[Any], __UpperCAmelCase : List[str], __UpperCAmelCase : Any=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(lowerCamelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE : Tuple = torch.ones((self.model_tester.num_masks,) )
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
SCREAMING_SNAKE_CASE : List[Any] = mask.expand(self.model_tester.batch_size, -1 ).bool()
SCREAMING_SNAKE_CASE : str = bool_masked_pos.to(lowerCamelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase_ ),
]:
SCREAMING_SNAKE_CASE : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase_ )
return inputs_dict
def lowercase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def lowercase__ (self : str ) -> str:
"""simple docstring"""
pass
def lowercase__ (self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, nn.Linear ) )
def lowercase__ (self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def lowercase__ (self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
@slow
def lowercase__ (self : List[str] ) -> Any:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[str] = VideoMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase__ (self : Dict ) -> Tuple:
"""simple docstring"""
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[str] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : int = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase_ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : str = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
self.assertEqual(out_len + 1, len(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.attentions
self.assertEqual(len(lowerCamelCase_ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def lowercase__ (self : Dict ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Tuple, __UpperCAmelCase : List[str], __UpperCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(**self._prepare_for_class(lowerCamelCase_, lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = outputs.hidden_states
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase_ ), lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE : Tuple = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
pass
def __lowercase ():
SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE : Any = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ (self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : str = prepare_video()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(lowerCamelCase_, return_tensors='''pt''' ).to(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : int = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.3669, -0.0688, -0.2421] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def lowercase__ (self : Dict ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE : Tuple = prepare_video()
SCREAMING_SNAKE_CASE : int = image_processor(lowerCamelCase_, return_tensors='''pt''' ).to(lowerCamelCase_ )
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE : int = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(lowerCamelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 1408, 1536] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=lowerCamelCase_ )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.5142], device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase_, atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE : List[str] = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=lowerCamelCase_ ).to(
lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = torch.tensor(torch.tensor([0.6469] ), device=lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.loss, lowerCamelCase_, atol=1e-4 ) )
| 507 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = HfArgumentParser(__snake_case )
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE = TensorFlowBenchmark(args=__snake_case )
try:
__SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__SCREAMING_SNAKE_CASE = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__SCREAMING_SNAKE_CASE = " ".join(str(__snake_case ).split(" " )[:-1] )
__SCREAMING_SNAKE_CASE = ""
__SCREAMING_SNAKE_CASE = eval(str(__snake_case ).split(" " )[-1] )
__SCREAMING_SNAKE_CASE = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
__SCREAMING_SNAKE_CASE = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 693 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase ( a__ ):
def __init__( self : Tuple , __lowercase : Any , __lowercase : Union[str, Any] = None , __lowercase : Dict = None , __lowercase : int = None , __lowercase : Tuple = False , __lowercase : Union[str, Any] = False , __lowercase : Tuple = None , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
__lowercase =path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
__lowercase =Text(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , **lowerCamelCase_ , )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
if self.streaming:
__lowercase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowercase =None
__lowercase =None
__lowercase =None
__lowercase =None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
__lowercase =self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 90 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
__lowerCAmelCase : Optional[Any] = {"facebook/bart-base": BartForConditionalGeneration}
__lowerCAmelCase : int = {"facebook/bart-base": BartTokenizer}
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=UpperCamelCase__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , )
parser.add_argument(
'''--config_name''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=UpperCamelCase__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Where to store the final ONNX file.''' )
__UpperCAmelCase = parser.parse_args()
return args
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any]="cpu" ):
"""simple docstring"""
__UpperCAmelCase = model_dict[model_name].from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
__UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(UpperCamelCase__ )
if model_name in ["facebook/bart-base"]:
__UpperCAmelCase = 0
__UpperCAmelCase = None
__UpperCAmelCase = 0
return huggingface_model, tokenizer
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
"""simple docstring"""
model.eval()
__UpperCAmelCase = None
__UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase__ ) )
with torch.no_grad():
__UpperCAmelCase = '''My friends are cool but they eat too many carbs.'''
__UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
__UpperCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCamelCase__ , max_length=UpperCamelCase__ , early_stopping=UpperCamelCase__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
UpperCamelCase__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , UpperCamelCase__ , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=UpperCamelCase__ , )
logger.info('''Model exported to {}'''.format(UpperCamelCase__ ) )
__UpperCAmelCase = remove_dup_initializers(os.path.abspath(UpperCamelCase__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCamelCase__ ) )
__UpperCAmelCase = onnxruntime.InferenceSession(UpperCamelCase__ )
__UpperCAmelCase = ort_sess.run(
UpperCamelCase__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(UpperCamelCase__ ),
'''max_length''': np.array(UpperCamelCase__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = parse_args()
__UpperCAmelCase = 5
__UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__UpperCAmelCase = torch.device(args.device )
__UpperCAmelCase , __UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , UpperCamelCase__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(UpperCamelCase__ )
if args.max_length:
__UpperCAmelCase = args.max_length
if args.num_beams:
__UpperCAmelCase = args.num_beams
if args.output_file_path:
__UpperCAmelCase = args.output_file_path
else:
__UpperCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 262 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('''KEY''')
__UpperCAmelCase = TypeVar('''VAL''')
@dataclass(frozen=a__ , slots=a__ )
class a__ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowercase__ : KEY
lowercase__ : VAL
class a__ ( _Item ):
'''simple docstring'''
def __init__( self ) -> None:
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self ) -> bool:
return False
__UpperCAmelCase = _DeletedItem()
class a__ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 8 , lowerCamelCase_ = 0.75 ) -> None:
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return hash(lowerCamelCase_ ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Iterator[int]:
lowerCAmelCase__ = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self , lowerCamelCase_ ) -> None:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , lowerCamelCase_ ) -> VAL:
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
lowerCAmelCase__ = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})""" | 90 | 0 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCamelCase_ ( a__ ):
def __init__( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=0 ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 1.0 if scale is None else scale
UpperCAmelCase = 0.0 if loc is None else loc
super().__init__(lowerCamelCase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCamelCase_ )] )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase_ ( self ) -> str:
"""simple docstring"""
return self.variance.sqrt()
class UpperCamelCase_ ( nn.Module ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
UpperCAmelCase = args_dim
UpperCAmelCase = nn.ModuleList([nn.Linear(lowerCamelCase_ , lowerCamelCase_ ) for dim in args_dim.values()] )
UpperCAmelCase = domain_map
def UpperCamelCase_ ( self , snake_case__ ) -> Tuple[torch.Tensor]:
"""simple docstring"""
UpperCAmelCase = [proj(lowerCamelCase_ ) for proj in self.proj]
return self.domain_map(*lowerCamelCase_ )
class UpperCamelCase_ ( nn.Module ):
def __init__( self , snake_case__ ) -> Any:
"""simple docstring"""
super().__init__()
UpperCAmelCase = function
def UpperCamelCase_ ( self , snake_case__ , *snake_case__ ) -> int:
"""simple docstring"""
return self.function(lowerCamelCase_ , *lowerCamelCase_ )
class UpperCamelCase_ :
_A : type
_A : int
_A : Dict[str, int]
def __init__( self , snake_case__ = 1 ) -> None:
"""simple docstring"""
UpperCAmelCase = dim
UpperCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase_ ( self , snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCamelCase_ )
else:
return Independent(self.distribution_class(*lowerCamelCase_ ) , 1 )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , ) -> Distribution:
"""simple docstring"""
UpperCAmelCase = self._base_distribution(lowerCamelCase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCamelCase_ , loc=lowerCamelCase_ , scale=lowerCamelCase_ , event_dim=self.event_dim )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase_ ( self ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def UpperCamelCase_ ( self ) -> float:
"""simple docstring"""
return 0.0
def UpperCamelCase_ ( self , snake_case__ ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCamelCase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase_ ( self , *snake_case__ ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def UpperCamelCase_ ( snake_case__ ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCamelCase_ ) + 4.0 )) / 2.0
class UpperCamelCase_ ( a__ ):
_A : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
_A : type = StudentT
@classmethod
def UpperCamelCase_ ( cls , snake_case__ , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase = 2.0 + cls.squareplus(lowerCamelCase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase_ ( a__ ):
_A : Dict[str, int] = {"loc": 1, "scale": 1}
_A : type = Normal
@classmethod
def UpperCamelCase_ ( cls , snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = cls.squareplus(lowerCamelCase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCamelCase_ ( a__ ):
_A : Dict[str, int] = {"total_count": 1, "logits": 1}
_A : type = NegativeBinomial
@classmethod
def UpperCamelCase_ ( cls , snake_case__ , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = cls.squareplus(lowerCamelCase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase_ ( self , snake_case__ ) -> Distribution:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ )
else:
return Independent(self.distribution_class(total_count=lowerCamelCase_ , logits=lowerCamelCase_ ) , 1 )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None ) -> Distribution:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 673 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = OmegaConf.load(A )
lowerCAmelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
lowerCAmelCase__ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''first_stage_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase__ = {}
lowerCAmelCase__ = '''model.diffusion_model.'''
for key in keys:
if key.startswith(A ):
lowerCAmelCase__ = state_dict[key]
lowerCAmelCase__ = config.model.params.first_stage_config.params
lowerCAmelCase__ = config.model.params.unet_config.params
lowerCAmelCase__ = VQModel(**A ).eval()
vqvae.load_state_dict(A )
lowerCAmelCase__ = UNetLDMModel(**A ).eval()
unet.load_state_dict(A )
lowerCAmelCase__ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , )
lowerCAmelCase__ = LDMPipeline(A , A , A )
pipeline.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
__UpperCAmelCase = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 90 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=13 , a__=3 , a__=2_24 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
__snake_case :Any = size if size is not None else {"""height""": 18, """width""": 18}
__snake_case :List[Any] = parent
__snake_case :Optional[int] = batch_size
__snake_case :Tuple = num_channels
__snake_case :int = image_size
__snake_case :Optional[Any] = min_resolution
__snake_case :Dict = max_resolution
__snake_case :Any = do_resize
__snake_case :int = size
__snake_case :Any = do_normalize
__snake_case :Optional[Any] = image_mean
__snake_case :List[str] = image_std
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( a__ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :List[str] = EfficientFormerImageProcessorTester(self )
@property
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
__snake_case :str = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__snake_case :Dict = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case :int = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
__snake_case :Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__snake_case :int = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
__snake_case :Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
__snake_case :Optional[Any] = image_processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 455 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowercase_ = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
lowercase_ = dataset.iloc[:, 1:2].values
lowercase_ = dataset.iloc[:, 2].values
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowercase_ = PolynomialFeatures(degree=4)
lowercase_ = poly_reg.fit_transform(X)
lowercase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
plt.scatter(_UpperCAmelCase , _UpperCAmelCase , color='red' )
plt.plot(_UpperCAmelCase , pol_reg.predict(poly_reg.fit_transform(_UpperCAmelCase ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 562 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_12 , lowercase__=16 , lowercase__=2 , lowercase__=0.02 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
snake_case_ : Optional[Any] = parent
snake_case_ : List[str] = 13
snake_case_ : Optional[Any] = 7
snake_case_ : Any = True
snake_case_ : Any = True
snake_case_ : Tuple = True
snake_case_ : Tuple = True
snake_case_ : int = 99
snake_case_ : str = 3_84
snake_case_ : Any = 2
snake_case_ : Union[str, Any] = 4
snake_case_ : Optional[Any] = 37
snake_case_ : Optional[int] = """gelu"""
snake_case_ : Tuple = 0.1
snake_case_ : Dict = 0.1
snake_case_ : Dict = 5_12
snake_case_ : str = 16
snake_case_ : str = 2
snake_case_ : Tuple = 0.02
snake_case_ : Optional[int] = 3
snake_case_ : Optional[int] = 4
snake_case_ : Dict = 1_28
snake_case_ : str = 2
snake_case_ : List[str] = 9
snake_case_ : Tuple = 1
snake_case_ : Dict = None
def __UpperCamelCase (self ):
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Tuple = None
if self.use_input_mask:
snake_case_ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Any = None
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Any = TFConvBertModel(config=lowerCamelCase_ )
snake_case_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case_ : int = [input_ids, input_mask]
snake_case_ : Dict = model(lowerCamelCase_ )
snake_case_ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Tuple = TFConvBertForMaskedLM(config=lowerCamelCase_ )
snake_case_ : List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = self.num_labels
snake_case_ : Tuple = TFConvBertForSequenceClassification(config=lowerCamelCase_ )
snake_case_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Tuple = self.num_choices
snake_case_ : List[Any] = TFConvBertForMultipleChoice(config=lowerCamelCase_ )
snake_case_ : Tuple = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : str = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case_ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : List[Any] = self.num_labels
snake_case_ : str = TFConvBertForTokenClassification(config=lowerCamelCase_ )
snake_case_ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ : List[str] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Dict = TFConvBertForQuestionAnswering(config=lowerCamelCase_ )
snake_case_ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case_ : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase (self ):
snake_case_ : List[str] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : str = config_and_inputs
snake_case_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( a__ , a__ , unittest.TestCase):
"""simple docstring"""
_A : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : int = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Any = False
_A : Dict = False
_A : int = False
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = TFConvBertModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __UpperCamelCase (self ):
self.config_tester.run_common_tests()
def __UpperCamelCase (self ):
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = True
snake_case_ : Optional[Any] = True
if hasattr(lowerCamelCase_ , """use_cache""" ):
snake_case_ : Optional[int] = True
snake_case_ : Any = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case_ : Any = getattr(self.model_tester , """key_length""" , lowerCamelCase_ )
for model_class in self.all_model_classes:
snake_case_ : List[str] = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Dict = model_class(lowerCamelCase_ )
snake_case_ : Dict = len(model(lowerCamelCase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ )
snake_case_ : Optional[int] = os.path.join(lowerCamelCase_ , """saved_model""" , """1""" )
snake_case_ : List[Any] = tf.keras.models.load_model(lowerCamelCase_ )
snake_case_ : int = model(lowerCamelCase_ )
if self.is_encoder_decoder:
snake_case_ : Optional[Any] = outputs["""encoder_hidden_states"""]
snake_case_ : List[str] = outputs["""encoder_attentions"""]
else:
snake_case_ : Dict = outputs["""hidden_states"""]
snake_case_ : Union[str, Any] = outputs["""attentions"""]
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
snake_case_ : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase (self ):
snake_case_ : Any = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(lowerCamelCase_ )
def __UpperCamelCase (self ):
snake_case_ , snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[Any] = True
snake_case_ : Tuple = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case_ : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case_ : Optional[Any] = getattr(self.model_tester , """key_length""" , lowerCamelCase_ )
snake_case_ : Optional[Any] = getattr(self.model_tester , """key_length""" , lowerCamelCase_ )
def check_decoder_attentions_output(lowercase__ ):
snake_case_ : Optional[int] = len(lowerCamelCase_ )
self.assertEqual(out_len % 2 , 0 )
snake_case_ : Union[str, Any] = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase__ ):
snake_case_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case_ : Tuple = True
snake_case_ : Optional[int] = False
snake_case_ : Any = model_class(lowerCamelCase_ )
snake_case_ : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case_ : Any = len(lowerCamelCase_ )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
if self.is_encoder_decoder:
snake_case_ : List[Any] = model_class(lowerCamelCase_ )
snake_case_ : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_decoder_attentions_output(lowerCamelCase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ : Optional[Any] = True
snake_case_ : Tuple = model_class(lowerCamelCase_ )
snake_case_ : Optional[int] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
# Check attention is always last and order is fine
snake_case_ : List[str] = True
snake_case_ : Union[str, Any] = True
snake_case_ : Optional[Any] = model_class(lowerCamelCase_ )
snake_case_ : Optional[int] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_ ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase_ )
check_encoder_attentions_output(lowerCamelCase_ )
@require_tf
class __lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def __UpperCamelCase (self ):
snake_case_ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Union[str, Any] = model(lowerCamelCase_ )[0]
snake_case_ : List[str] = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCamelCase_ )
snake_case_ : List[str] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 )
| 480 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = str(A )
return len(A ) == 9 and set(A ) == set('''123456789''' )
def _snake_case ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCAmelCase__ = 100002 * base_num
if is_9_pandigital(A ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCAmelCase__ = 1002003 * base_num
if is_9_pandigital(A ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
__A : List[Any] = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : str = 1 , UpperCamelCase : Optional[Any] = "new" , UpperCamelCase : Tuple = None ):
"""simple docstring"""
A__ : Optional[int] =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase ) - valid_terms ) ):
A__ : Any =F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase )
A__ : Union[str, Any] =requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
A__ : Dict =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase )}
A__ : Optional[int] ={}
for id_ in range(UpperCamelCase ):
A__ : Any ={
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 656 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 0 |
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> List[Any]:
self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[int]=None ) -> List[str]:
if red is not None:
__UpperCAmelCase =red
if green is not None:
__UpperCAmelCase =green
if blue is not None:
__UpperCAmelCase =blue
if red_edge is not None:
__UpperCAmelCase =red_edge
if nir is not None:
__UpperCAmelCase =nir
return True
def _a ( self : str , __SCREAMING_SNAKE_CASE : int="" , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None ) -> Optional[Any]:
self.set_matricies(red=lowerCamelCase_ , green=lowerCamelCase_ , blue=lowerCamelCase_ , red_edge=lowerCamelCase_ , nir=lowerCamelCase_ )
__UpperCAmelCase ={
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _a ( self : Dict ) -> List[Any]:
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _a ( self : Optional[Any] ) -> int:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _a ( self : Union[str, Any] ) -> Union[str, Any]:
return self.nir * (self.red / (self.green**2))
def _a ( self : List[Any] ) -> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _a ( self : Optional[Any] ) -> str:
return (self.nir - self.red) / (self.nir + self.red)
def _a ( self : Union[str, Any] ) -> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def _a ( self : Optional[Any] ) -> int:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _a ( self : int ) -> int:
return (self.nir - self.green) / (self.nir + self.green)
def _a ( self : int ) -> List[str]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _a ( self : str ) -> Dict:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _a ( self : Union[str, Any] ) -> str:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _a ( self : Optional[Any] ) -> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[str]=0.08 , __SCREAMING_SNAKE_CASE : Optional[int]=1.22 , __SCREAMING_SNAKE_CASE : Any=0.03 ) -> int:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _a ( self : List[str] ) -> Union[str, Any]:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _a ( self : List[Any] ) -> Any:
return (self.nir / self.green) - 1
def _a ( self : Optional[int] ) -> Optional[int]:
return (self.nir / self.redEdge) - 1
def _a ( self : Optional[int] ) -> List[Any]:
return (self.red - self.blue) / self.red
def _a ( self : Any ) -> Optional[Any]:
__UpperCAmelCase =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _a ( self : Optional[Any] ) -> Tuple:
return self.nir - self.green
def _a ( self : List[Any] ) -> int:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _a ( self : Dict ) -> Tuple:
__UpperCAmelCase =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.16 ) -> Optional[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=0.5 ) -> Union[str, Any]:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _a ( self : Tuple ) -> List[str]:
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None ) -> Any:
return (self.nir - b) / (a * self.red)
def _a ( self : str ) -> Union[str, Any]:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _a ( self : str ) -> Union[str, Any]:
return (self.red + self.green + self.blue) / 30.5
def _a ( self : Optional[int] ) -> Tuple:
return self.nir / self.red
def _a ( self : Tuple ) -> int:
return (self.rvi() - 1) / (self.rvi() + 1)
def _a ( self : Tuple ) -> List[str]:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _a ( self : Union[str, Any] ) -> Optional[Any]:
return self.green / (self.nir + self.red + self.green)
def _a ( self : Tuple ) -> Union[str, Any]:
return self.nir / (self.nir + self.red + self.green)
def _a ( self : Any ) -> Optional[int]:
return self.red / (self.nir + self.red + self.green)
def _a ( self : List[str] ) -> List[Any]:
return (self.green - self.red) / (self.green + self.red)
def _a ( self : str ) -> Any:
return (self.red - self.green) / (self.red + self.green)
def _a ( self : Optional[int] ) -> str:
__UpperCAmelCase =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__UpperCAmelCase =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _a ( self : List[str] ) -> Union[str, Any]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _a ( self : Union[str, Any] ) -> int:
return self.nir / self.red
def _a ( self : int ) -> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _a ( self : Union[str, Any] ) -> Dict:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 68 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def _snake_case ( ) -> Union[str, Any]:
raise RuntimeError('''CUDA out of memory.''' )
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(lowerCamelCase_ ) ) )
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ ):
nonlocal batch_sizes
batch_sizes.append(lowerCamelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ , lowerCAmelCase__ = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCamelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCamelCase_ ):
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCamelCase_ ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCamelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase_ )
lowerCAmelCase__ = release_memory(lowerCamelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self :Union[str, Any] , lowercase_ :Dict , lowercase_ :Dict=13 , lowercase_ :List[str]=7 , lowercase_ :str=True , lowercase_ :Union[str, Any]=True , lowercase_ :int=True , lowercase_ :str=True , lowercase_ :Dict=99 , lowercase_ :Union[str, Any]=32 , lowercase_ :List[str]=5 , lowercase_ :Optional[Any]=4 , lowercase_ :Union[str, Any]=37 , lowercase_ :str="gelu" , lowercase_ :int=0.1 , lowercase_ :Optional[Any]=0.1 , lowercase_ :Dict=5_12 , lowercase_ :List[str]=16 , lowercase_ :Any=2 , lowercase_ :str=0.0_2 , lowercase_ :int=4 , )-> List[Any]:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def UpperCAmelCase_ ( self :List[Any] )-> str:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
A__ = self.prepare_config_and_inputs()
A__, A__, A__, A__ = config_and_inputs
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCAmelCase ( a__ , unittest.TestCase ):
__lowercase = True
__lowercase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self :List[str] )-> Tuple:
A__ = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
A__ = FlaxBertModel.from_pretrained("bert-base-cased" )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 440 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]:
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
lowerCAmelCase__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase__ = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase__ = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
lowerCAmelCase__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase__ = []
lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = load_in_abit
lowerCAmelCase__ = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
lowerCAmelCase__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase__ = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCAmelCase__ = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
lowerCAmelCase__ = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase__ = True
lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase__ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase__ = {}
lowerCAmelCase__ = special_dtypes
lowerCAmelCase__ = no_split_module_classes
lowerCAmelCase__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase__ = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
lowerCAmelCase__ = max_memory
lowerCAmelCase__ = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def _snake_case ( A , A , A=None , A=None ) -> Any:
if modules_to_not_convert is None:
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]:
lowerCAmelCase__ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase__ = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase__ = '''.'''.join(A )
lowerCAmelCase__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase__ = module.weight.data
if module.bias is not None:
lowerCAmelCase__ = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
lowerCAmelCase__ = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers(
A , A , A , A )
lowerCAmelCase__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _snake_case ( A ) -> Tuple:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase__ = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase__ = sum(A , [] )
lowerCAmelCase__ = len(A ) > 0
# Check if it is a base model
lowerCAmelCase__ = False
if hasattr(A , '''base_model_prefix''' ):
lowerCAmelCase__ = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase__ = list(model.named_children() )
lowerCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase__ = set(A ) - set(A )
lowerCAmelCase__ = list(set(A ) ) + list(A )
# remove ".weight" from the keys
lowerCAmelCase__ = ['''.weight''', '''.bias''']
lowerCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase__ = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def _snake_case ( A ) -> Optional[int]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def _snake_case ( A ) -> Union[str, Any]:
return next(parameter.parameters() ).device
def _snake_case ( A , A , A , A , A , A , A ) -> Any:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
lowerCAmelCase__ = param_name
lowerCAmelCase__ = model
if "." in tensor_name:
lowerCAmelCase__ = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase__ = getattr(A , A )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCAmelCase__ = new_module
lowerCAmelCase__ = splits[-1]
# offload weights
lowerCAmelCase__ = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) ) | 90 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
snake_case_ = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
snake_case_ = """</w>"""
snake_case_ = """@@ """
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = set()
SCREAMING_SNAKE_CASE : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE : Union[str, Any] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class a__ ( a__ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__(self : Union[str, Any], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : List[Any]="<s>", __UpperCAmelCase : Optional[int]="<pad>", __UpperCAmelCase : str="</s>", __UpperCAmelCase : int="<unk>", __UpperCAmelCase : Union[str, Any]=False, __UpperCAmelCase : Dict=None, **__UpperCAmelCase : Tuple, ) -> int:
"""simple docstring"""
super().__init__(
unk_token=lowerCamelCase_, bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, pad_token=lowerCamelCase_, do_lower_case=lowerCamelCase_, **lowerCamelCase_, )
SCREAMING_SNAKE_CASE : int = do_lower_case
with open(lowerCamelCase_, encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Any = json.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
else:
with open(lowerCamelCase_, encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE : int = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE : int = dict(zip(lowerCamelCase_, range(len(lowerCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE : Dict = {}
@property
def lowercase__ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase__ (self : List[Any], __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE : int = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE : List[str] = min(lowerCamelCase_, key=lambda __UpperCAmelCase : self.bpe_ranks.get(lowerCamelCase_, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = bigram
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : int = 0
while i < len(lowerCamelCase_ ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = word.index(lowerCamelCase_, lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE : List[str] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE : str = tuple(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = ''' '''.join(lowerCamelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE : Dict = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = word.replace(lowerCamelCase_, '''''' )
SCREAMING_SNAKE_CASE : Optional[Any] = word.replace(''' ''', lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = word
return word
def lowercase__ (self : List[str], __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
SCREAMING_SNAKE_CASE : str = text.lower()
SCREAMING_SNAKE_CASE : Tuple = text.split()
SCREAMING_SNAKE_CASE : Tuple = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def lowercase__ (self : Dict, __UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
return self.encoder.get(lowerCamelCase_, self.encoder.get(self.unk_token ) )
def lowercase__ (self : Any, __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.decoder.get(lowerCamelCase_, self.unk_token )
return result
def lowercase__ (self : int, __UpperCAmelCase : Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = ''' '''.join(lowerCamelCase_ )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(string.split(lowerCamelCase_ ) )
return string
def lowercase__ (self : str, __UpperCAmelCase : Tuple, __UpperCAmelCase : Tuple = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase_, ensure_ascii=lowerCamelCase_ ) + '''\n''' )
SCREAMING_SNAKE_CASE : Any = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCamelCase_, '''w''', encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 507 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case : Optional[Any] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_snake_case : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 693 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_=2 , lowerCamelCase_=3 , lowerCamelCase_=64 , lowerCamelCase_=None ) -> Dict:
lowerCAmelCase__ = np.random.default_rng(lowerCamelCase_ )
lowerCAmelCase__ = length
lowerCAmelCase__ = rng.normal(size=(length,) ).astype(np.floataa )
lowerCAmelCase__ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Any:
return self.length
def __getitem__( self , lowerCamelCase_ ) -> List[str]:
return {"x": self.x[i], "y": self.y[i]}
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Optional[Any]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a[0] + self.b[0]
class a__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=False ) -> Any:
super().__init__()
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = torch.nn.Parameter(torch.tensor(lowerCamelCase_ ).float() )
lowerCAmelCase__ = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=None ) -> Any:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
lowerCAmelCase__ = False
return x * self.a + self.b
def _snake_case ( A , A = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase__ = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
lowerCAmelCase__ = load_dataset('''csv''' , data_files=A )
lowerCAmelCase__ = datasets['''train'''].unique('''label''' )
lowerCAmelCase__ = {v: i for i, v in enumerate(A )}
def tokenize_function(A ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A , padding='''max_length''' )
if "label" in examples:
lowerCAmelCase__ = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ = datasets.map(
A , batched=A , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=2 )
lowerCAmelCase__ = DataLoader(tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=1 )
return train_dataloader, eval_dataloader | 90 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
},
'''tokenizer_file''': {
'''google/bigbird-roberta-base''': (
'''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'''
),
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
UpperCAmelCase = '''▁'''
class lowerCAmelCase ( a__ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = BigBirdTokenizer
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = []
def __init__( self : Optional[int] , __lowercase : Any=None , __lowercase : Any=None , __lowercase : List[Any]="<unk>" , __lowercase : str="<s>" , __lowercase : Optional[int]="</s>" , __lowercase : Optional[Any]="<pad>" , __lowercase : Any="[SEP]" , __lowercase : Union[str, Any]="[MASK]" , __lowercase : int="[CLS]" , **__lowercase : int , ):
"""simple docstring"""
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
__lowercase =vocab_file
__lowercase =False if not self.vocab_file else True
def snake_case ( self : str , __lowercase : Any , __lowercase : Optional[Any] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : Tuple = None , __lowercase : int = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1]
def snake_case ( self : Optional[int] , __lowercase : Dict , __lowercase : List[str] = None ):
"""simple docstring"""
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict , __lowercase : List[str] , __lowercase : List[Any] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase =os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 119 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__UpperCAmelCase = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _snake_case ( A , A=None ) -> Optional[Any]:
require_version(deps[pkg] , A ) | 90 | 0 |
'''simple docstring'''
import re
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(UpperCamelCase__ , UpperCamelCase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator("+918827897895"))
| 262 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A=False , A=False , A=False ) -> Union[str, Any]:
lowerCAmelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def _snake_case ( A , A ) -> List[str]:
for i in range(config.num_hidden_layers ):
lowerCAmelCase__ = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def _snake_case ( A , A , A ) -> str:
lowerCAmelCase__ = dct.pop(A )
lowerCAmelCase__ = val
@torch.no_grad()
def _snake_case ( A , A ) -> Any:
lowerCAmelCase__ = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=A )
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 3129
lowerCAmelCase__ = '''huggingface/label-files'''
lowerCAmelCase__ = '''vqa2-id2label.json'''
lowerCAmelCase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase__ = {int(A ): v for k, v in idalabel.items()}
lowerCAmelCase__ = idalabel
lowerCAmelCase__ = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ = ViltForQuestionAnswering(A )
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = 2
lowerCAmelCase__ = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ = 3
lowerCAmelCase__ = ViltForImagesAndTextClassification(A )
elif "irtr" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForImageAndTextRetrieval(A )
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ = True
lowerCAmelCase__ = ViltForMaskedLM(A )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
lowerCAmelCase__ = create_rename_keys(A , A , A , A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , A )
if mlm_model or irtr_model:
lowerCAmelCase__ = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(A )
# Define processor
lowerCAmelCase__ = ViltImageProcessor(size=384 )
lowerCAmelCase__ = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCAmelCase__ = ViltProcessor(A , A )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=A ).raw )
lowerCAmelCase__ = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowerCAmelCase__ = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=A ).raw )
if mlm_model:
lowerCAmelCase__ = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ = '''How many cats are there?'''
lowerCAmelCase__ = processor(A , A , return_tensors='''pt''' )
lowerCAmelCase__ = model(**A )
# Verify outputs
if mlm_model:
lowerCAmelCase__ = torch.Size([1, 11, 30522] )
lowerCAmelCase__ = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify masked token prediction equals "cats"
lowerCAmelCase__ = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowerCAmelCase__ = torch.Size([1, 3129] )
lowerCAmelCase__ = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , A , atol=1E-4 )
# verify vqa prediction equals "2"
lowerCAmelCase__ = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ = torch.Size([1, 2] )
lowerCAmelCase__ = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(A ).mkdir(exist_ok=A )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
processor.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__UpperCAmelCase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return len(set(lowerCAmelCase ) ) == len(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
'''simple docstring'''
import re
def _snake_case ( A ) -> bool:
lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(A , A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895''')) | 90 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCamelCase ( snake_case__ : Dict ,snake_case__ : Dict ,snake_case__ : str ,snake_case__ : str ,snake_case__ : Any ,snake_case__ : str ):
'''simple docstring'''
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__snake_case :Any = """lm_head"""
__snake_case :Optional[int] = getattr(snake_case__ ,snake_case__ )
if weight_type is not None:
__snake_case :Tuple = getattr(snake_case__ ,snake_case__ ).shape
else:
__snake_case :int = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__snake_case :Any = value
elif weight_type == "weight_g":
__snake_case :List[str] = value
elif weight_type == "weight_v":
__snake_case :Optional[int] = value
elif weight_type == "bias":
__snake_case :List[Any] = value
else:
__snake_case :Optional[int] = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : List[str] ,snake_case__ : int ):
'''simple docstring'''
__snake_case :List[Any] = []
__snake_case :List[str] = fairseq_model.state_dict()
__snake_case :Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__snake_case :Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,hf_model.config.feat_extract_norm == """group""" ,)
__snake_case :Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__snake_case :Optional[Any] = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__snake_case :Tuple = True
if "*" in mapped_key:
__snake_case :Dict = name.split(snake_case__ )[0].split(""".""" )[-2]
__snake_case :List[str] = mapped_key.replace("""*""" ,snake_case__ )
if "weight_g" in name:
__snake_case :Any = """weight_g"""
elif "weight_v" in name:
__snake_case :str = """weight_v"""
elif "bias" in name:
__snake_case :Optional[Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case :List[str] = """weight"""
else:
__snake_case :List[str] = None
set_recursively(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( snake_case__ : int ,snake_case__ : str ,snake_case__ : Union[str, Any] ,snake_case__ : Union[str, Any] ,snake_case__ : int ):
'''simple docstring'''
__snake_case :Optional[Any] = full_name.split("""conv_layers.""" )[-1]
__snake_case :str = name.split(""".""" )
__snake_case :Tuple = int(items[0] )
__snake_case :List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__snake_case :List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__snake_case :Tuple = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__snake_case :int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__snake_case :Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
@torch.no_grad()
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : Union[str, Any] ,snake_case__ : str=None ,snake_case__ : Optional[int]=None ,snake_case__ : List[Any]=True ):
'''simple docstring'''
if config_path is not None:
__snake_case :Tuple = UniSpeechConfig.from_pretrained(snake_case__ )
else:
__snake_case :Any = UniSpeechConfig()
if is_finetuned:
if dict_path:
__snake_case :Dict = Dictionary.load_from_json(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__snake_case :str = target_dict.pad_index
__snake_case :Any = target_dict.bos_index
__snake_case :Tuple = target_dict.eos_index
__snake_case :List[str] = len(target_dict.symbols )
__snake_case :List[Any] = os.path.join(snake_case__ ,"""vocab.json""" )
if not os.path.isdir(snake_case__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case__ ) )
return
os.makedirs(snake_case__ ,exist_ok=snake_case__ )
__snake_case :Any = target_dict.indices
# fairseq has the <pad> and <s> switched
__snake_case :Any = 42
__snake_case :Optional[Any] = 43
with open(snake_case__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(snake_case__ ,snake_case__ )
__snake_case :List[str] = WavaVecaPhonemeCTCTokenizer(
snake_case__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=snake_case__ ,)
__snake_case :Tuple = True if config.feat_extract_norm == """layer""" else False
__snake_case :Any = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=snake_case__ ,return_attention_mask=snake_case__ ,)
__snake_case :int = WavaVecaProcessor(feature_extractor=snake_case__ ,tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
__snake_case :Optional[Any] = UniSpeechForCTC(snake_case__ )
else:
__snake_case :Union[str, Any] = UniSpeechForPreTraining(snake_case__ )
if is_finetuned:
__snake_case , __snake_case , __snake_case :Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
__snake_case , __snake_case , __snake_case :Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__snake_case :Any = model[0].eval()
recursively_load_weights(snake_case__ ,snake_case__ ,snake_case__ )
hf_unispeech.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCamelCase__ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 455 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''facebook/esm2_t6_8M_UR50D''': '''https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt''',
'''facebook/esm2_t12_35M_UR50D''': '''https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt''',
},
}
__UpperCAmelCase = {
'''facebook/esm2_t6_8M_UR50D''': 1_024,
'''facebook/esm2_t12_35M_UR50D''': 1_024,
}
def _snake_case ( A ) -> Optional[Any]:
with open(A , '''r''' ) as f:
lowerCAmelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase_ , lowerCamelCase_="<unk>" , lowerCamelCase_="<cls>" , lowerCamelCase_="<pad>" , lowerCamelCase_="<mask>" , lowerCamelCase_="<eos>" , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = load_vocab_file(lowerCamelCase_ )
lowerCAmelCase__ = dict(enumerate(self.all_tokens ) )
lowerCAmelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCAmelCase__ = unk_token
lowerCAmelCase__ = cls_token
lowerCAmelCase__ = pad_token
lowerCAmelCase__ = mask_token
lowerCAmelCase__ = eos_token
lowerCAmelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , **lowerCamelCase_ ) -> Union[str, Any]:
return text.split()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_=False ) -> Dict:
return len(self._id_to_token )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
return self._token_to_id.get(lowerCamelCase_ , self._token_to_id.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str:
return self._id_to_token.get(lowerCamelCase_ , self.unk_token )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None ) -> List[int]:
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCAmelCase__ = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowerCamelCase_ ) + [1]
return mask
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = os.path.join(lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowerCamelCase_ , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False ) -> int:
return super()._add_tokens(lowerCamelCase_ , special_tokens=lowerCamelCase_ ) | 90 | 0 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ('foo.json',)] )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] ):
_a = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ , config_name=lowerCamelCase_ )
_a = GenerationConfig.from_pretrained(lowerCamelCase_ , config_name=lowerCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , lowerCamelCase_ )
def _UpperCAmelCase ( self : List[Any] ):
_a = AutoConfig.from_pretrained('gpt2' )
_a = GenerationConfig.from_model_config(lowerCamelCase_ )
_a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = GenerationConfig()
_a = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
_a = copy.deepcopy(lowerCamelCase_ )
_a = generation_config.update(**lowerCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase_ , {'foo': 'bar'} )
def _UpperCAmelCase ( self : str ):
_a = GenerationConfig()
_a = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase_ )
_a = GenerationConfig.from_pretrained(lowerCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
_a = GenerationConfig.from_model_config(lowerCamelCase_ )
assert not hasattr(lowerCamelCase_ , 'foo' ) # no new kwargs should be initialized if from config
def _UpperCAmelCase ( self : Tuple ):
_a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
_a = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_ )
_a = GenerationConfig.from_pretrained(lowerCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCAmelCase ( cls : Any ):
_a = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def _UpperCAmelCase ( cls : int ):
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def _UpperCAmelCase ( self : Optional[Any] ):
_a = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='test-generation-config' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
def _UpperCAmelCase ( self : Optional[int] ):
_a = GenerationConfig(
do_sample=lowerCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=lowerCamelCase_ , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) )
| 562 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = AltDiffusionPipeline
lowercase__ : Dict = TEXT_TO_IMAGE_PARAMS
lowercase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
lowerCAmelCase__ = CLIPTextModel(lowerCamelCase_ )
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> List[str]:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = '''A photo of an astronaut'''
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ = RobertaSeriesModelWithTransformation(lowerCamelCase_ )
lowerCAmelCase__ = text_encoder
lowerCAmelCase__ = AltDiffusionPipeline(**lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = self.get_dummy_inputs(lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe(**lowerCamelCase_ )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
lowerCAmelCase__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ )
lowerCAmelCase__ = alt_pipe.to(lowerCamelCase_ )
alt_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe([prompt] , generator=lowerCamelCase_ , num_inference_steps=2 , output_type='''numpy''' )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 90 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__(self ):
snake_case_ : Dict = [2, 1, 2, -1]
snake_case_ : Tuple = [1, 2, 3, 4]
def __UpperCamelCase (self ):
snake_case_ : str = len(self.first_signal )
snake_case_ : Tuple = len(self.second_signal )
snake_case_ : str = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
snake_case_ : Union[str, Any] = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
snake_case_ : Dict = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case_ : Union[str, Any] = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 480 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return x if y == 0 else greatest_common_divisor(A , x % y )
def _snake_case ( A , A ) -> int:
return (x * y) // greatest_common_divisor(A , A )
def _snake_case ( A = 20 ) -> int:
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
lowerCAmelCase__ = lcm(A , A )
return g
if __name__ == "__main__":
print(f"""{solution() = }""") | 90 | 0 |
"""simple docstring"""
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __lowerCAmelCase ( a__):
'''simple docstring'''
def __lt__( self : Any , UpperCamelCase__ : List[str] ):
return self[-1] < other[-1]
def __eq__( self : List[Any] , UpperCamelCase__ : Any ):
return self[-1] == other[-1]
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Optional[int] =[]
# sort into stacks
for element in collection:
A__ : List[str] =Stack([element] )
A__ : Optional[int] =bisect_left(UpperCamelCase , UpperCamelCase )
if i != len(UpperCamelCase ):
stacks[i].append(UpperCamelCase )
else:
stacks.append(UpperCamelCase )
# use a heap-based merge to merge stack efficiently
A__ : List[str] =merge(*(reversed(UpperCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
__A : Dict = input("Enter numbers separated by a comma:\n").strip()
__A : Tuple = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 656 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__UpperCAmelCase = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _snake_case ( A , A=None , A=None , A=None ) -> Union[str, Any]:
lowerCAmelCase__ = True
while ask_again:
lowerCAmelCase__ = input(A )
try:
if default is not None and len(A ) == 0:
return default
return convert_value(A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(A )
def _snake_case ( A , A=[] , A=None , A=0 ) -> List[Any]:
lowerCAmelCase__ = BulletMenu(A , A )
lowerCAmelCase__ = menu.run(default_choice=A )
return convert_value(A ) if convert_value is not None else result
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( A ) -> str:
lowerCAmelCase__ = int(A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( A ) -> Tuple:
lowerCAmelCase__ = int(A )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( A ) -> Union[str, Any]:
lowerCAmelCase__ = int(A )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( A ) -> List[str]:
return {"yes": True, "no": False}[value.lower()]
class a__ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = super()._format_usage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 90 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class _A ( a__ , a__ ):
"""simple docstring"""
lowerCamelCase : str = "focalnet"
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]=224 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Dict=96 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Any=[192, 384, 768, 768] , __SCREAMING_SNAKE_CASE : Tuple=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE : Any=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=4.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Dict=1e-4 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> str:
super().__init__(**lowerCamelCase_ )
__UpperCAmelCase =image_size
__UpperCAmelCase =patch_size
__UpperCAmelCase =num_channels
__UpperCAmelCase =embed_dim
__UpperCAmelCase =use_conv_embed
__UpperCAmelCase =hidden_sizes
__UpperCAmelCase =depths
__UpperCAmelCase =focal_levels
__UpperCAmelCase =focal_windows
__UpperCAmelCase =hidden_act
__UpperCAmelCase =mlp_ratio
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =drop_path_rate
__UpperCAmelCase =use_layerscale
__UpperCAmelCase =layerscale_value
__UpperCAmelCase =use_post_layernorm
__UpperCAmelCase =use_post_layernorm_in_modulation
__UpperCAmelCase =normalize_modulator
__UpperCAmelCase =initializer_range
__UpperCAmelCase =layer_norm_eps
__UpperCAmelCase =encoder_stride
__UpperCAmelCase =["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase =get_aligned_output_features_output_indices(
out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
| 68 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : torch.FloatTensor
class a__ ( a__ , a__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCamelCase_ = 3 , lowerCamelCase_ = 3 , lowerCamelCase_ = ("DownEncoderBlock2D",) , lowerCamelCase_ = ("UpDecoderBlock2D",) , lowerCamelCase_ = (64,) , lowerCamelCase_ = 1 , lowerCamelCase_ = "silu" , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 32 , lowerCamelCase_ = None , lowerCamelCase_ = 0.18_215 , lowerCamelCase_ = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
lowerCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
lowerCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
lowerCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> VQEncoderOutput:
lowerCAmelCase__ = self.encoder(lowerCamelCase_ )
lowerCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
lowerCAmelCase__ = h
lowerCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
lowerCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase__ = sample
lowerCAmelCase__ = self.encode(lowerCamelCase_ ).latents
lowerCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ ) | 90 | 0 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : Dict ):
return "".join([hex(_lowerCamelCase )[2:].zfill(2 ).upper() for byte in list(_lowerCamelCase )] )
def UpperCamelCase ( _lowerCamelCase : str ):
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_lowerCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_lowerCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_lowerCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 440 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class a__ :
def __init__(self : Optional[Any], __UpperCAmelCase : Tuple, __UpperCAmelCase : Optional[int]=13, __UpperCAmelCase : Any=7, __UpperCAmelCase : Optional[Any]=False, __UpperCAmelCase : str=True, __UpperCAmelCase : int=False, __UpperCAmelCase : Optional[int]=True, __UpperCAmelCase : str=33, __UpperCAmelCase : Any=32, __UpperCAmelCase : Union[str, Any]=5, __UpperCAmelCase : Tuple=4, __UpperCAmelCase : List[str]=37, __UpperCAmelCase : Union[str, Any]="gelu", __UpperCAmelCase : List[Any]=0.1, __UpperCAmelCase : Tuple=0.1, __UpperCAmelCase : Any=512, __UpperCAmelCase : List[str]=16, __UpperCAmelCase : Dict=2, __UpperCAmelCase : List[str]=0.02, __UpperCAmelCase : Optional[Any]=3, __UpperCAmelCase : Optional[Any]=4, __UpperCAmelCase : Optional[int]=None, ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : Dict = batch_size
SCREAMING_SNAKE_CASE : Dict = seq_length
SCREAMING_SNAKE_CASE : Dict = is_training
SCREAMING_SNAKE_CASE : List[str] = use_input_mask
SCREAMING_SNAKE_CASE : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : List[Any] = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : Any = num_choices
SCREAMING_SNAKE_CASE : List[str] = scope
def lowercase__ (self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
def lowercase__ (self : str, __UpperCAmelCase : Any, __UpperCAmelCase : str, __UpperCAmelCase : Optional[Any], __UpperCAmelCase : Optional[int], __UpperCAmelCase : Tuple, __UpperCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = EsmModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_, attention_mask=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def lowercase__ (self : Tuple, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Dict, __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Tuple, __UpperCAmelCase : List[str], __UpperCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = EsmForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ (self : Dict, __UpperCAmelCase : Any, __UpperCAmelCase : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : Any, __UpperCAmelCase : Any, __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = EsmForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase_, attention_mask=lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ (self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( a__, a__, unittest.TestCase ):
__magic_name__ : Optional[Any] = False
__magic_name__ : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : int = ()
__magic_name__ : Optional[int] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__magic_name__ : Tuple = True
def lowercase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = EsmModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=37 )
def lowercase__ (self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase__ (self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE : Union[str, Any] = type
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowercase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowercase__ (self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowercase__ (self : int ) -> Dict:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : str = EsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowercase__ (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : Optional[Any] = EsmEmbeddings(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE : Optional[int] = create_position_ids_from_input_ids(lowerCamelCase_, model.padding_idx )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCamelCase_, lowerCamelCase_ ) ) )
def lowercase__ (self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE : Dict = EsmEmbeddings(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.empty(2, 4, 30 )
SCREAMING_SNAKE_CASE : Optional[int] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE : int = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE : Union[str, Any] = embeddings.create_position_ids_from_inputs_embeds(lowerCamelCase_ )
self.assertEqual(position_ids.shape, expected_positions.shape )
self.assertTrue(torch.all(torch.eq(lowerCamelCase_, lowerCamelCase_ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ (self : int ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowercase__ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ (self : Any ) -> List[Any]:
"""simple docstring"""
pass
@require_torch
class a__ ( a__ ):
@slow
def lowercase__ (self : Any ) -> Any:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )[0]
SCREAMING_SNAKE_CASE : Optional[int] = 33
SCREAMING_SNAKE_CASE : str = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape, lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def lowercase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 507 |
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 90 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_snake_case : int = get_tests_dir('fixtures')
_snake_case : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_snake_case : str = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
__SCREAMING_SNAKE_CASE = 0
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict()
config_dict.pop("feature_extractor_type" )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(**lowerCamelCase_ )
# save in new folder
model_config.save_pretrained(lowerCamelCase_ )
config.save_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCamelCase_, "bert-base is not a local folder and is not a valid model identifier" ):
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("bert-base" )
def __lowerCAmelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
lowerCamelCase_, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_, revision="aaaaaa" )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaisesRegex(
lowerCamelCase_, "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.", ):
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_, trust_remote_code=lowerCamelCase_ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__, "NewFeatureExtractor" )
def __lowerCAmelCase ( self ) -> int:
try:
AutoConfig.register("custom", lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_, lowerCamelCase_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase_ ):
AutoFeatureExtractor.register(lowerCamelCase_, lowerCamelCase_ )
# Now that the config is registered, it can be used as any other config with the auto-API
__SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
self.assertIsInstance(lowerCamelCase_, lowerCamelCase_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self ) -> str:
class __SCREAMING_SNAKE_CASE ( a__ ):
SCREAMING_SNAKE_CASE__ =True
try:
AutoConfig.register("custom", lowerCamelCase_ )
AutoFeatureExtractor.register(lowerCamelCase_, lowerCamelCase_ )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor", trust_remote_code=lowerCamelCase_ )
self.assertEqual(feature_extractor.__class__.__name__, "NewFeatureExtractor" )
self.assertTrue(not hasattr(lowerCamelCase_, "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 693 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.