code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self, lowerCAmelCase, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=2, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=36, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=6, lowerCAmelCase=6, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, lowerCAmelCase=1_000, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =num_channels
lowerCamelCase_ =image_size
lowerCamelCase_ =patch_size
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_token_type_ids
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_act
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =type_sequence_label_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =coordinate_size
lowerCamelCase_ =shape_size
lowerCamelCase_ =num_labels
lowerCamelCase_ =num_choices
lowerCamelCase_ =scope
lowerCamelCase_ =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase_ =text_seq_length
lowerCamelCase_ =(image_size // patch_size) ** 2 + 1
lowerCamelCase_ =self.text_seq_length + self.image_seq_length
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase_ =bbox[i, j, 3]
lowerCamelCase_ =bbox[i, j, 1]
lowerCamelCase_ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase_ =bbox[i, j, 2]
lowerCamelCase_ =bbox[i, j, 0]
lowerCamelCase_ =t
lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase_ =None
if self.use_token_type_ids:
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCamelCase_ =None
lowerCamelCase_ =None
if self.use_labels:
lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCamelCase_ =LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# text + image
lowerCamelCase_ =model(lowerCAmelCase, pixel_values=lowerCAmelCase )
lowerCamelCase_ =model(
lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, token_type_ids=lowerCAmelCase )
lowerCamelCase_ =model(lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase_ =model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase_ =model(pixel_values=lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(
lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =self.num_labels
lowerCamelCase_ =LayoutLMvaForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(
lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
lowerCamelCase_ =model(
lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : Tuple =False
lowercase : Any =False
lowercase : List[str] =False
lowercase : Any =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase : Any =(
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
return True
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(lowerCAmelCase )
if model_class in get_values(lowerCAmelCase ):
lowerCamelCase_ ={
k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous()
if isinstance(lowerCAmelCase, torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase ):
lowerCamelCase_ =torch.ones(self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
elif model_class in get_values(lowerCAmelCase ):
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
elif model_class in [
*get_values(lowerCAmelCase ),
]:
lowerCamelCase_ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase )
elif model_class in [
*get_values(lowerCAmelCase ),
]:
lowerCamelCase_ =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=lowerCAmelCase, )
return inputs_dict
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ =type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =LayoutLMvaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a_ ( ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase ) if is_vision_available() else None
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCAmelCase )
lowerCamelCase_ =self.default_image_processor
lowerCamelCase_ =prepare_img()
lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).pixel_values.to(lowerCAmelCase )
lowerCamelCase_ =torch.tensor([[1, 2]] )
lowerCamelCase_ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCamelCase_ =model(
input_ids=input_ids.to(lowerCAmelCase ), bbox=bbox.to(lowerCAmelCase ), pixel_values=pixel_values.to(lowerCAmelCase ), )
# verify the logits
lowerCamelCase_ =torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape, lowerCAmelCase )
lowerCamelCase_ =torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-4 ) )
| 676 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> str:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__snake_case , __snake_case ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
lowerCamelCase_ =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( UpperCamelCase , UpperCamelCase ):
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( UpperCamelCase , UpperCamelCase ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
A = features.copy()
A = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = tmp_path / "cache"
A = JsonDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
assert isinstance(UpperCamelCase , UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if issubclass(UpperCamelCase , UpperCamelCase ):
A = jsonl_path
elif issubclass(UpperCamelCase , UpperCamelCase ):
A = [jsonl_path]
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_dataset(UpperCamelCase , UpperCamelCase )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ):
assert isinstance(UpperCamelCase , UpperCamelCase )
for split in splits:
A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = JsonDatasetReader({"train": jsonl_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader({"train": jsonl_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
if split:
A = {split: jsonl_path}
else:
A = "train"
A = {"train": jsonl_path, "test": jsonl_path}
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read()
_check_json_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( UpperCamelCase ):
return json.load(UpperCamelCase )
def A__ ( UpperCamelCase ):
return [json.loads(UpperCamelCase ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase ( self :Dict , __UpperCamelCase :List[Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase ).write()
buffer.seek(0 )
A = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase ( self :str , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[int] , __UpperCamelCase :Dict , __UpperCamelCase :str , __UpperCamelCase :str ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase ).write()
buffer.seek(0 )
A = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase ( self :Any , __UpperCamelCase :Tuple , __UpperCamelCase :str , __UpperCamelCase :int ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
A = load_json_function(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert isinstance(exported_content[0] , __UpperCamelCase )
assert len(__UpperCamelCase ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase ( self :List[str] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :List[Any] , __UpperCamelCase :Optional[Any] , __UpperCamelCase :List[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , lines=__UpperCamelCase , orient=__UpperCamelCase , num_proc=2 ).write()
buffer.seek(0 )
A = load_json(__UpperCamelCase )
assert isinstance(__UpperCamelCase , __UpperCamelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__UpperCamelCase , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__UpperCamelCase ) == 10
def lowerCamelCase ( self :int , __UpperCamelCase :Tuple ):
with pytest.raises(__UpperCamelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Any , __UpperCamelCase :int , __UpperCamelCase :str , __UpperCamelCase :List[Any] , __UpperCamelCase :Union[str, Any] ):
A = tmp_path_factory.mktemp("data" ) / f"test.json.{extension}"
A = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(__UpperCamelCase , __UpperCamelCase , compression=__UpperCamelCase ).write()
with fsspec.open(__UpperCamelCase , "rb" , compression="infer" ) as f:
A = f.read()
with fsspec.open(__UpperCamelCase , "rb" , compression="infer" ) as f:
A = f.read()
assert exported_content == original_content
| 524 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def A__ ( UpperCamelCase , UpperCamelCase ):
A = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def A__ ( UpperCamelCase , UpperCamelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
A = in_proj_weight[
: encoder_config.hidden_size, :
]
A = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A = in_proj_weight[
-encoder_config.hidden_size :, :
]
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = dct.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
if "handwritten" in checkpoint_url:
A = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase ):
A = ViTConfig(image_size=384 , qkv_bias=UpperCamelCase )
A = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A = 1_024
A = 4_096
A = 24
A = 16
A = 1_024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A = False
A = "relu"
A = 1_024
A = True
A = False
A = False
# load HuggingFace model
A = ViTModel(UpperCamelCase , add_pooling_layer=UpperCamelCase )
A = TrOCRForCausalLM(UpperCamelCase )
A = VisionEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" , check_hash=UpperCamelCase )["model"]
A = create_rename_keys(UpperCamelCase , UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A = state_dict.pop(UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A = val
else:
A = val
# load state dict
model.load_state_dict(UpperCamelCase )
# Check outputs on an image
A = ViTImageProcessor(size=encoder_config.image_size )
A = RobertaTokenizer.from_pretrained("roberta-large" )
A = TrOCRProcessor(UpperCamelCase , UpperCamelCase )
A = processor(images=prepare_img(UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A = model(pixel_values=UpperCamelCase , decoder_input_ids=UpperCamelCase )
A = outputs.logits
A = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
A = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , UpperCamelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_snake_case : Any = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 524 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _A :
'''simple docstring'''
@staticmethod
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Dict = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ,threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) ,0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,{
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} ,)
import datasets
snake_case : Any = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" ,"""image""" ,split="""test""" )
snake_case : Any = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
snake_case : Union[str, Any] = object_detector(SCREAMING_SNAKE_CASE_ ,threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) ,0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,{
"""score""": ANY(SCREAMING_SNAKE_CASE_ ),
"""label""": ANY(SCREAMING_SNAKE_CASE_ ),
"""box""": {"""xmin""": ANY(SCREAMING_SNAKE_CASE_ ), """ymin""": ANY(SCREAMING_SNAKE_CASE_ ), """xmax""": ANY(SCREAMING_SNAKE_CASE_ ), """ymax""": ANY(SCREAMING_SNAKE_CASE_ )},
} ,)
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def snake_case_ ( self ):
'''simple docstring'''
pass
@require_torch
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = """hf-internal-testing/tiny-detr-mobilenetsv3"""
snake_case : Optional[int] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : str = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
snake_case : List[str] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] ,)
snake_case : Optional[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.33_76, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = """facebook/detr-resnet-50"""
snake_case : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Any = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ ,feature_extractor=SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
snake_case : str = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : str = """facebook/detr-resnet-50"""
snake_case : str = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : Any = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
snake_case : List[Any] = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.99_82, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.99_60, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.99_55, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] ,)
@require_torch
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = 0.99_85
snake_case : List[Any] = """facebook/detr-resnet-50"""
snake_case : Any = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ )
snake_case : Tuple = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" ,threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_88, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.99_87, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] ,)
@require_torch
@require_pytesseract
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = """Narsil/layoutlmv3-finetuned-funsd"""
snake_case : Dict = 0.99_93
snake_case : Any = pipeline("""object-detection""" ,model=SCREAMING_SNAKE_CASE_ ,threshold=SCREAMING_SNAKE_CASE_ )
snake_case : Dict = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=4 ) ,[
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.99_93, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] ,)
| 36 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_snake_case : Any = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_snake_case : Optional[Any] = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
_snake_case : str = {
'yjernite/retribert-base-uncased': 512,
}
_snake_case : Optional[int] = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = RetriBertTokenizer
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : str="[UNK]" , lowerCAmelCase_ : Optional[Any]="[SEP]" , lowerCAmelCase_ : List[str]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : List[Any]="[MASK]" , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase_ ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(lowerCAmelCase_ , normalizer_state.pop('type' ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**lowerCAmelCase_ )
__lowerCAmelCase = do_lower_case
def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ )
return tuple(lowerCAmelCase_ )
| 53 | 0 |
"""simple docstring"""
from collections import deque
def snake_case__ ( _lowerCamelCase ) ->Dict:
"""simple docstring"""
__lowercase : Any = len(_lowerCamelCase )
__lowercase : str = deque()
__lowercase : Tuple = [False for _ in range(_lowerCamelCase )]
__lowercase : Optional[int] = [-1 for _ in range(_lowerCamelCase )]
__lowercase : Dict = index_of[:]
def strong_connect(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase ):
__lowercase : List[str] = index # the number when this node is seen
__lowercase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__lowercase : List[Any] = True
for w in g[v]:
if index_of[w] == -1:
__lowercase : Optional[int] = strong_connect(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
__lowercase : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__lowercase : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__lowercase : Any = []
__lowercase : List[Any] = stack.pop()
__lowercase : Any = False
component.append(_lowerCamelCase )
while w != v:
__lowercase : List[str] = stack.pop()
__lowercase : Dict = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__lowercase : Tuple = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase, 0, _lowerCamelCase )
return components
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Tuple:
"""simple docstring"""
__lowercase : int = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
__A : int = 7
__A : int = [0, 0, 1, 2, 3, 3, 4, 4, 6]
__A : List[Any] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
__A : List[str] = [(u, v) for u, v in zip(source, target)]
__A : List[str] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 281 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->list:
"""simple docstring"""
__lowercase : Optional[int] = [0] * len(_lowerCamelCase )
for i in range(1, len(_lowerCamelCase ) ):
# use last results for better performance - dynamic programming
__lowercase : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__lowercase : List[str] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__lowercase : Any = j
return prefix_result
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
return max(prefix_function(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 1 |
"""simple docstring"""
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ = 10_001 ) -> int:
try:
a_ : int = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
a_ : list[int] = []
a_ : Tuple = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""") | 237 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = FunnelTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **a_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : int = "UNwant\u00E9d,running"
a_ : List[Any] = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file )
a_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
a_ : Dict = tokenizer("UNwant\u00E9d,running" )
a_ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
a_ : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len ) | 237 | 1 |
"""simple docstring"""
import argparse
import datetime
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase ={
'''0''': '''Sunday''',
'''1''': '''Monday''',
'''2''': '''Tuesday''',
'''3''': '''Wednesday''',
'''4''': '''Thursday''',
'''5''': '''Friday''',
'''6''': '''Saturday''',
}
__UpperCamelCase ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__UpperCamelCase ) < 1_1:
raise ValueError('''Must be 10 characters long''' )
# Get month
__UpperCamelCase =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError('''Month must be between 1 - 12''' )
__UpperCamelCase =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__UpperCamelCase =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__UpperCamelCase =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__UpperCamelCase =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__UpperCamelCase =datetime.date(int(__UpperCamelCase ) , int(__UpperCamelCase ) , int(__UpperCamelCase ) )
# Start math
if m <= 2:
__UpperCamelCase =y - 1
__UpperCamelCase =m + 1_2
# maths var
__UpperCamelCase =int(str(__UpperCamelCase )[:2] )
__UpperCamelCase =int(str(__UpperCamelCase )[2:] )
__UpperCamelCase =int(2.6 * m - 5.3_9 )
__UpperCamelCase =int(c / 4 )
__UpperCamelCase =int(k / 4 )
__UpperCamelCase =int(d + k )
__UpperCamelCase =int(t + u + v + x )
__UpperCamelCase =int(z - (2 * c) )
__UpperCamelCase =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__UpperCamelCase =F"""Your date {date_input}, is a {days[str(__UpperCamelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase = argparse.ArgumentParser(
description=(
'''Find out what day of the week nearly any date is or was. Enter '''
'''date as a string in the mm-dd-yyyy or mm/dd/yyyy format'''
)
)
parser.add_argument(
'''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)'''
)
__lowercase = parser.parse_args()
zeller(args.date_input)
| 296 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''vit'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=16 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =encoder_stride
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
'''simple docstring'''
return 1E-4
| 296 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__UpperCamelCase , cache_dir=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [t[-1] for t in os.walk(os.path.join(__UpperCamelCase , os.listdir(__UpperCamelCase )[0] , 'snapshots' ) )]
SCREAMING_SNAKE_CASE_ : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE_ : str = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Tuple = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Any = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.split(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(__UpperCamelCase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__UpperCamelCase ) == num_samples
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Tuple = 5_0
SCREAMING_SNAKE_CASE_ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : List[str] = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.split(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(__UpperCamelCase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 5_0
SCREAMING_SNAKE_CASE_ : Any = jax.device_count()
SCREAMING_SNAKE_CASE_ : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Tuple = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Tuple = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any = jax.random.split(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__UpperCamelCase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : Dict = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : Dict = 5_0
SCREAMING_SNAKE_CASE_ : List[str] = jax.device_count()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : str = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = jax.random.split(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(__UpperCamelCase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , set_alpha_to_one=__UpperCamelCase , steps_offset=1 , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.create_state()
SCREAMING_SNAKE_CASE_ : Dict = scheduler_state
SCREAMING_SNAKE_CASE_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_ : int = 5_0
SCREAMING_SNAKE_CASE_ : Tuple = jax.device_count()
SCREAMING_SNAKE_CASE_ : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : int = pipeline.prepare_inputs(__UpperCamelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = jax.random.split(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(__UpperCamelCase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE_ : Any = jax.device_count()
SCREAMING_SNAKE_CASE_ : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.random.split(jax.random.PRNGKey(0 ) , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Tuple = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline.prepare_inputs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=__UpperCamelCase , use_memory_efficient_attention=__UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Any = replicate(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline.prepare_inputs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : str = shard(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE_ : str = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 101 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''swinv2'''
snake_case__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , __UpperCamelCase : List[str]=224 , __UpperCamelCase : Any=4 , __UpperCamelCase : int=3 , __UpperCamelCase : Tuple=96 , __UpperCamelCase : Union[str, Any]=[2, 2, 6, 2] , __UpperCamelCase : List[Any]=[3, 6, 12, 24] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=4.0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Any=1E-5 , __UpperCamelCase : Optional[Any]=32 , **__UpperCamelCase : Any , ) -> List[Any]:
super().__init__(**__UpperCamelCase )
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = depths
_UpperCamelCase = len(__UpperCamelCase )
_UpperCamelCase = num_heads
_UpperCamelCase = window_size
_UpperCamelCase = mlp_ratio
_UpperCamelCase = qkv_bias
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = hidden_act
_UpperCamelCase = use_absolute_embeddings
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = initializer_range
_UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
_UpperCamelCase = (0, 0, 0, 0)
| 420 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A ( snake_case__ ):
"""simple docstring"""
lowerCamelCase = """ctrl"""
lowerCamelCase = ["""past_key_values"""]
lowerCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str],lowercase_ : Dict=2_4_6_5_3_4,lowercase_ : List[Any]=2_5_6,lowercase_ : Dict=1_2_8_0,lowercase_ : List[str]=8_1_9_2,lowercase_ : str=4_8,lowercase_ : List[str]=1_6,lowercase_ : Union[str, Any]=0.1,lowercase_ : List[str]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : List[Any]=0.02,lowercase_ : int=True,**lowercase_ : int,)-> Dict:
'''simple docstring'''
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = dff
A__ = resid_pdrop
A__ = embd_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
super().__init__(**UpperCAmelCase_ )
| 702 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A :
"""simple docstring"""
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None # sigma(t_i)
@classmethod
def snake_case__ ( cls : Tuple )-> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
lowerCamelCase = 42
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
return True
@register_to_config
def __init__( self : str,lowercase_ : float = 0.02,lowercase_ : float = 1_0_0,lowercase_ : float = 1.007,lowercase_ : float = 8_0,lowercase_ : float = 0.05,lowercase_ : float = 5_0,)-> int:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Dict:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def snake_case__ ( self : Dict,lowercase_ : KarrasVeSchedulerState,lowercase_ : int,lowercase_ : Tuple = () )-> KarrasVeSchedulerState:
'''simple docstring'''
A__ = jnp.arange(0,lowercase_ )[::-1].copy()
A__ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase_,schedule=jnp.array(lowercase_,dtype=jnp.floataa ),timesteps=lowercase_,)
def snake_case__ ( self : int,lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : random.KeyArray,)-> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A__ = min(self.config.s_churn / state.num_inference_steps,2**0.5 - 1 )
else:
A__ = 0
# sample eps ~ N(0, S_noise^2 * I)
A__ = random.split(lowercase_,num=1 )
A__ = self.config.s_noise * random.normal(key=lowercase_,shape=sample.shape )
A__ = sigma + gamma * sigma
A__ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self : Union[str, Any],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_hat + sigma_hat * model_output
A__ = (sample_hat - pred_original_sample) / sigma_hat
A__ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : jnp.ndarray,lowercase_ : float,lowercase_ : float,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : jnp.ndarray,lowercase_ : bool = True,)-> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A__ = sample_prev + sigma_prev * model_output
A__ = (sample_prev - pred_original_sample) / sigma_prev
A__ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase_,derivative=lowercase_,state=lowercase_ )
def snake_case__ ( self : Optional[int],lowercase_ : KarrasVeSchedulerState,lowercase_ : str,lowercase_ : str,lowercase_ : Any )-> Any:
'''simple docstring'''
raise NotImplementedError()
| 586 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(_lowerCAmelCase ,0 ,_lowerCAmelCase ,args=(_lowerCAmelCase) )[0]
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
return math.pow(_lowerCAmelCase ,z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 569 |
class _UpperCAmelCase :
def __init__( self , a__ , a__ ):
A_ : str = name
A_ : int = val
def __str__( self ):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , a__ ):
return self.val < other.val
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : int = {}
A_ : Union[str, Any] = {}
A_ : str = self.build_heap(a__ )
def __getitem__( self , a__ ):
return self.get_value(a__ )
def _lowerCamelCase ( self , a__ ):
return (idx - 1) // 2
def _lowerCamelCase ( self , a__ ):
return idx * 2 + 1
def _lowerCamelCase ( self , a__ ):
return idx * 2 + 2
def _lowerCamelCase ( self , a__ ):
return self.heap_dict[key]
def _lowerCamelCase ( self , a__ ):
A_ : str = len(a__ ) - 1
A_ : Optional[int] = self.get_parent_idx(a__ )
for idx, i in enumerate(a__ ):
A_ : Union[str, Any] = idx
A_ : Union[str, Any] = i.val
for i in range(a__ , -1 , -1 ):
self.sift_down(a__ , a__ )
return array
def _lowerCamelCase ( self , a__ , a__ ):
while True:
A_ : Dict = self.get_left_child_idx(a__ ) # noqa: E741
A_ : Tuple = self.get_right_child_idx(a__ )
A_ : List[str] = idx
if l < len(a__ ) and array[l] < array[idx]:
A_ : Any = l
if r < len(a__ ) and array[r] < array[smallest]:
A_ : Union[str, Any] = r
if smallest != idx:
A_ , A_ : Optional[int] = array[smallest], array[idx]
(
(
A_
) , (
A_
) ,
) : int = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
A_ : Union[str, Any] = smallest
else:
break
def _lowerCamelCase ( self , a__ ):
A_ : Union[str, Any] = self.get_parent_idx(a__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
A_ , A_ : Optional[int] = self.heap[idx], self.heap[p]
A_ , A_ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
A_ : Optional[int] = p
A_ : str = self.get_parent_idx(a__ )
def _lowerCamelCase ( self ):
return self.heap[0]
def _lowerCamelCase ( self ):
A_ , A_ : List[Any] = self.heap[-1], self.heap[0]
A_ , A_ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
A_ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowerCamelCase ( self , a__ ):
self.heap.append(a__ )
A_ : int = len(self.heap ) - 1
A_ : List[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowerCamelCase ( self ):
return len(self.heap ) == 0
def _lowerCamelCase ( self , a__ , a__ ):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
A_ : Union[str, Any] = new_value
A_ : int = new_value
self.sift_up(self.idx_of_element[node] )
_lowerCAmelCase = Node("""R""", -1)
_lowerCAmelCase = Node("""B""", 6)
_lowerCAmelCase = Node("""A""", 3)
_lowerCAmelCase = Node("""X""", 1)
_lowerCAmelCase = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
_lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 569 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ : Dict = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
if "://" in dataset_path:
snake_case__ = dataset_path.split('''://''' )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
snake_case__ = not is_remote_filesystem(__lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowerCAmelCase ) , fs._strip_protocol(__lowerCAmelCase ) )
else:
fs.mv(__lowerCAmelCase , __lowerCAmelCase , recursive=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ = None
snake_case__ = None
snake_case__ = threading.Lock()
| 208 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : int = XGLMTokenizer
__lowercase : List[Any] = XGLMTokenizerFast
__lowercase : List[str] = True
__lowercase : int = True
def SCREAMING_SNAKE_CASE__ ( self:int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ = XGLMTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = '''<pad>'''
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_a ) , 10_08 )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = XGLMTokenizer(_a , keep_accents=_a )
snake_case__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
snake_case__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case__ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a , f.name )
snake_case__ = XGLMTokenizer(f.name , keep_accents=_a )
snake_case__ = pickle.dumps(_a )
pickle.loads(_a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = '''I was born in 92000, and this is falsé.'''
snake_case__ = tokenizer.tokenize(_a )
snake_case__ = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
snake_case__ = tokenizer.encode(_a , add_special_tokens=_a )
snake_case__ = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(_a )
snake_case__ = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = '''Hello World!'''
snake_case__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
snake_case__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
# fmt: off
snake_case__ = {
'''input_ids''': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''facebook/xglm-564M''' , padding=_a , )
| 208 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class UpperCamelCase :
"""simple docstring"""
def A__ ( self : int ):
torch.manual_seed(0 )
A__ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
A__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
A__ = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
A__ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=_lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
A__ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
A__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def A__ ( self : List[Any] ):
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A__ = self.get_dummy_inputs(_lowerCamelCase )
A__ = inputs['''prompt''']
A__ = inputs['''generator''']
A__ = inputs['''num_inference_steps''']
A__ = inputs['''output_type''']
if "image" in inputs:
A__ = inputs['''image''']
else:
A__ = None
if "mask_image" in inputs:
A__ = inputs['''mask_image''']
else:
A__ = None
if "original_image" in inputs:
A__ = inputs['''original_image''']
else:
A__ = None
A__ , A__ = pipe.encode_prompt(_lowerCamelCase )
# inputs with prompt converted to embeddings
A__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
A__ = image
if mask_image is not None:
A__ = mask_image
if original_image is not None:
A__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
A__ = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCamelCase , _lowerCamelCase ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
A__ = self.get_dummy_inputs(_lowerCamelCase )
A__ = inputs['''generator''']
A__ = inputs['''num_inference_steps''']
A__ = inputs['''output_type''']
# inputs with prompt converted to embeddings
A__ = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
A__ = image
if mask_image is not None:
A__ = mask_image
if original_image is not None:
A__ = original_image
A__ = pipe_loaded(**_lowerCamelCase )[0]
A__ = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1E-4 )
def A__ ( self : Tuple ):
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A__ = self.get_dummy_inputs(_lowerCamelCase )
A__ = pipe(**_lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCamelCase )
A__ = self.pipeline_class.from_pretrained(_lowerCamelCase )
pipe_loaded.to(_lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
A__ = self.get_dummy_inputs(_lowerCamelCase )
A__ = pipe_loaded(**_lowerCamelCase )[0]
A__ = np.abs(to_np(_lowerCamelCase ) - to_np(_lowerCamelCase ) ).max()
self.assertLess(_lowerCamelCase , 1E-4 )
| 571 |
"""simple docstring"""
from collections import namedtuple
__snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
__snake_case : Union[str, Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def a_ ( __a , __a , __a ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(__a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(__a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 1 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ) -> Any:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=True ) -> Tuple:
model.train()
lowerCamelCase_ = model(__UpperCamelCase )
lowerCamelCase_ = F.mse_loss(__UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__UpperCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : str=False ) -> Tuple:
set_seed(42 )
lowerCamelCase_ = RegressionModel()
lowerCamelCase_ = deepcopy(__UpperCamelCase )
lowerCamelCase_ = RegressionDataset(length=80 )
lowerCamelCase_ = DataLoader(__UpperCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCamelCase_ = AdamW(params=model.parameters() , lr=1e-3 )
lowerCamelCase_ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowerCamelCase_ = LambdaLR(__UpperCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.65 )
lowerCamelCase_ = LambdaLR(__UpperCamelCase , lr_lambda=lambda _lowerCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCamelCase_ = accelerator.prepare(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
lowerCamelCase_ = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] ) -> int:
lowerCamelCase_ = get_training_setup(__UpperCamelCase )
# Use a single batch
lowerCamelCase_ = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> str:
lowerCamelCase_ = get_training_setup(__UpperCamelCase )
# Use a single batch
lowerCamelCase_ = next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def lowerCamelCase__ ( _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[Any]=False ) -> int:
lowerCamelCase_ = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_ = get_training_setup(__UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCamelCase_ = ddp_input[torch.randperm(len(__UpperCamelCase ) )]
GradientState._reset_state()
def lowerCamelCase__ ( _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=False ) -> Dict:
lowerCamelCase_ = Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCamelCase_ = get_training_setup(__UpperCamelCase , __UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
lowerCamelCase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCamelCase_ = accelerator.gather((ddp_input, ddp_target) )
lowerCamelCase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowerCamelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase__ ( ) -> List[str]:
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = RegressionDataset(length=80 )
lowerCamelCase_ = DataLoader(__UpperCamelCase , batch_size=16 )
lowerCamelCase_ = RegressionDataset(length=96 )
lowerCamelCase_ = DataLoader(__UpperCamelCase , batch_size=16 )
lowerCamelCase_ = accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if iteration < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if batch_num < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase__ ( ) -> Tuple:
lowerCamelCase_ = Accelerator()
lowerCamelCase_ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(__UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(__UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__UpperCamelCase , __UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
main()
if __name__ == "__main__":
main()
| 721 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_SCREAMING_SNAKE_CASE : int = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_SCREAMING_SNAKE_CASE : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_SCREAMING_SNAKE_CASE : Optional[int] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''allenai'''
def lowerCamelCase__ ( _lowerCamelCase : int ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowerCamelCase_ = d[k] # restore
return da
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ) -> Optional[Any]:
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase_ = basename(_lowerCamelCase )
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ = cls.hub_models()
lowerCamelCase_ = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase_ = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
lowerCamelCase_ = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = vars(chkpt['args']['model'] )
lowerCamelCase_ = args['source_lang']
lowerCamelCase_ = args['target_lang']
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = basename(_lowerCamelCase )
# dicts
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{src_lang}.txt''' )
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{tgt_lang}.txt''' )
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ = False
break
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase_ = fin.read()
lowerCamelCase_ = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase_ = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase_ = 5
lowerCamelCase_ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase_ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase_ = chkpt['models'][0]
lowerCamelCase_ = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase_ = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 137 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A = logging.get_logger(__name__)
# TODO: upload to AWS
_A = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _lowerCAmelCase ( __lowercase ):
_lowercase ="retribert"
def __init__( self , _UpperCamelCase=30_522 , _UpperCamelCase=768 , _UpperCamelCase=8 , _UpperCamelCase=12 , _UpperCamelCase=3_072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-1_2 , _UpperCamelCase=True , _UpperCamelCase=128 , _UpperCamelCase=0 , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(pad_token_id=_a , **_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = share_encoders
lowerCAmelCase_ = projection_dim
| 290 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 0 |
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCamelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCamelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCamelCase__ = {ord(char) for char in VALID_CHARS}
lowerCamelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def __A(lowerCAmelCase , lowerCAmelCase ) -> str | None:
"""simple docstring"""
_UpperCamelCase = ""
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
for keychar, cipherchar in zip(cycle(lowerCAmelCase ) , lowerCAmelCase ):
_UpperCamelCase = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowerCAmelCase )
return decoded
def __A(lowerCAmelCase ) -> list[str]:
"""simple docstring"""
_UpperCamelCase = []
for key in product(lowerCAmelCase , repeat=3 ):
_UpperCamelCase = try_key(lowerCAmelCase , lowerCAmelCase )
if encoded is not None:
possibles.append(lowerCAmelCase )
return possibles
def __A(lowerCAmelCase , lowerCAmelCase ) -> list[str]:
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def __A(lowerCAmelCase = "p059_cipher.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = Path(lowerCAmelCase ).parent.joinpath(lowerCAmelCase ).read_text(encoding="""utf-8""" )
_UpperCamelCase = [int(lowerCAmelCase ) for number in data.strip().split(""",""" )]
_UpperCamelCase = filter_valid_chars(lowerCAmelCase )
for common_word in COMMON_WORDS:
_UpperCamelCase = filter_common_word(lowerCAmelCase , lowerCAmelCase )
if len(lowerCAmelCase ) == 1:
break
_UpperCamelCase = possibles[0]
return sum(ord(lowerCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 202 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __A(lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = emb.weight.shape
_UpperCamelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
_UpperCamelCase = emb.weight.data
return lin_layer
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
_UpperCamelCase = Namespace(**checkpoint["""cfg"""]["""model"""] )
_UpperCamelCase = checkpoint["""model"""]
remove_ignore_keys_(lowerCAmelCase )
_UpperCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
_UpperCamelCase = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
_UpperCamelCase = XGLMConfig(
vocab_size=lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_UpperCamelCase = XGLMForCausalLM(lowerCAmelCase )
_UpperCamelCase = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
print(lowerCAmelCase )
_UpperCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 202 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _UpperCamelCase :
'''simple docstring'''
a_ : CommonSchedulerState
# setable values
a_ : jnp.ndarray
a_ : jnp.ndarray
a_ : Optional[int] = None
@classmethod
def _snake_case ( cls : List[str] , _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowerCamelCase , init_noise_sigma=_lowerCamelCase , timesteps=_lowerCamelCase )
@dataclass
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : DDPMSchedulerState
class _UpperCamelCase ( A,A ):
'''simple docstring'''
a_ : Union[str, Any] = [e.name for e in FlaxKarrasDiffusionSchedulers]
a_ : jnp.dtype
@property
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Optional[int] , _lowerCamelCase : int = 1_0_0_0 , _lowerCamelCase : float = 0.0_001 , _lowerCamelCase : float = 0.02 , _lowerCamelCase : str = "linear" , _lowerCamelCase : Optional[jnp.ndarray] = None , _lowerCamelCase : str = "fixed_small" , _lowerCamelCase : bool = True , _lowerCamelCase : str = "epsilon" , _lowerCamelCase : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
__lowerCamelCase : int = dtype
def _snake_case ( self : Tuple , _lowerCamelCase : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
__lowerCamelCase : List[str] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__lowerCamelCase : str = jnp.array(1.0 , dtype=self.dtype )
__lowerCamelCase : List[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowerCamelCase , init_noise_sigma=_lowerCamelCase , timesteps=_lowerCamelCase , )
def _snake_case ( self : Optional[int] , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : Optional[int] = None ):
'''simple docstring'''
return sample
def _snake_case ( self : int , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : int , _lowerCamelCase : Tuple = () ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__lowerCamelCase : int = (jnp.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase , )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : int , _lowerCamelCase : str=None , _lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowerCamelCase : List[str] = state.common.alphas_cumprod[t]
__lowerCamelCase : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowerCamelCase : List[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__lowerCamelCase : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__lowerCamelCase : List[Any] = jnp.clip(_lowerCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__lowerCamelCase : List[str] = jnp.log(jnp.clip(_lowerCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__lowerCamelCase : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__lowerCamelCase : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__lowerCamelCase : Optional[Any] = variance
__lowerCamelCase : List[str] = state.common.betas[t]
__lowerCamelCase : Optional[int] = (predicted_variance + 1) / 2
__lowerCamelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def _snake_case ( self : str , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : int , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : Optional[jax.random.KeyArray] = None , _lowerCamelCase : bool = True , ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = timestep
if key is None:
__lowerCamelCase : List[Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = jnp.split(_lowerCamelCase , sample.shape[1] , axis=1 )
else:
__lowerCamelCase : Dict = None
# 1. compute alphas, betas
__lowerCamelCase : Union[str, Any] = state.common.alphas_cumprod[t]
__lowerCamelCase : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__lowerCamelCase : Optional[Any] = 1 - alpha_prod_t
__lowerCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowerCamelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowerCamelCase : str = model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowerCamelCase : Optional[int] = jnp.clip(_lowerCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__lowerCamelCase : Optional[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCamelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__lowerCamelCase : Dict = jax.random.split(_lowerCamelCase , num=1 )
__lowerCamelCase : Union[str, Any] = jax.random.normal(_lowerCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowerCamelCase , _lowerCamelCase , predicted_variance=_lowerCamelCase ) ** 0.5) * noise
__lowerCamelCase : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__lowerCamelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowerCamelCase , state=_lowerCamelCase )
def _snake_case ( self : List[str] , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def _snake_case ( self : List[Any] , _lowerCamelCase : DDPMSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __len__( self : List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 519 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__UpperCamelCase : Optional[Any] = 6_378_137.0
__UpperCamelCase : Any = 6_356_752.314_245
__UpperCamelCase : Optional[int] = 6378137
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
"""simple docstring"""
__lowerCamelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__lowerCamelCase : Tuple = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
__lowerCamelCase : Dict = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__lowerCamelCase : Any = haversine_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__lowerCamelCase : Dict = (b_lata + b_lata) / 2
__lowerCamelCase : Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__lowerCamelCase : str = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[Any] = cos(sigma / 2 ) ** 2
__lowerCamelCase : Dict = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__lowerCamelCase : Tuple = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[str] = sin(sigma / 2 ) ** 2
__lowerCamelCase : List[str] = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase )-> bool:
if num < 0:
return False
__UpperCAmelCase = num
__UpperCAmelCase = 0
while num > 0:
__UpperCAmelCase = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )-> tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 621 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowerCamelCase ):
# initialize config
if "resnet-50" in model_name:
_SCREAMING_SNAKE_CASE : Optional[int] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_SCREAMING_SNAKE_CASE : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_SCREAMING_SNAKE_CASE : Optional[Any] = DetrConfig(use_timm_backbone=lowerCamelCase, backbone_config=lowerCamelCase )
# set label attributes
_SCREAMING_SNAKE_CASE : Optional[int] = 'panoptic' in model_name
if is_panoptic:
_SCREAMING_SNAKE_CASE : List[str] = 250
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = 91
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'huggingface/label-files'
_SCREAMING_SNAKE_CASE : Dict = 'coco-detection-id2label.json'
_SCREAMING_SNAKE_CASE : List[str] = json.load(open(hf_hub_download(lowerCamelCase, lowerCamelCase, repo_type='dataset' ), 'r' ) )
_SCREAMING_SNAKE_CASE : Dict = {int(lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE : List[Any] = idalabel
_SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowercase__ ( lowerCamelCase ):
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def lowercase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = val
def lowercase__ ( lowerCamelCase, lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE : Optional[int] = ''
if is_panoptic:
_SCREAMING_SNAKE_CASE : List[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Any = in_proj_weight[:256, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias[:256]
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[256:512, :]
_SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[256:512]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-256:, :]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[:256, :]
_SCREAMING_SNAKE_CASE : str = in_proj_bias[:256]
_SCREAMING_SNAKE_CASE : str = in_proj_weight[256:512, :]
_SCREAMING_SNAKE_CASE : int = in_proj_bias[256:512]
_SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[-256:, :]
_SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_SCREAMING_SNAKE_CASE : str = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[:256, :]
_SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias_cross_attn[:256]
_SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight_cross_attn[256:512, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[256:512]
_SCREAMING_SNAKE_CASE : int = in_proj_weight_cross_attn[-256:, :]
_SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_bias_cross_attn[-256:]
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=False ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = get_detr_config(lowerCamelCase )
# load original model from torch hub
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(f"""Converting model {model_name}...""" )
_SCREAMING_SNAKE_CASE : int = torch.hub.load('facebookresearch/detr', model_name_to_original_name[model_name], pretrained=lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase ):
if is_panoptic:
_SCREAMING_SNAKE_CASE : int = 'detr.' + src
rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase, is_panoptic=lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_SCREAMING_SNAKE_CASE : str = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE : Dict = DetrForSegmentation(lowerCamelCase ) if is_panoptic else DetrForObjectDetection(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# verify our conversion on an image
_SCREAMING_SNAKE_CASE : Optional[Any] = 'coco_panoptic' if is_panoptic else 'coco_detection'
_SCREAMING_SNAKE_CASE : int = DetrImageProcessor(format=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = processor(images=prepare_img(), return_tensors='pt' )
_SCREAMING_SNAKE_CASE : Tuple = encoding['pixel_values']
_SCREAMING_SNAKE_CASE : List[str] = detr(lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = model(lowerCamelCase )
assert torch.allclose(outputs.logits, original_outputs['pred_logits'], atol=1E-3 )
assert torch.allclose(outputs.pred_boxes, original_outputs['pred_boxes'], atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['pred_masks'], atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
lowerCAmelCase__ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 621 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Tuple = '''efficientformer'''
def __init__( self : List[Any] , _snake_case : List[int] = [3, 2, 6, 4] , _snake_case : List[int] = [48, 96, 224, 448] , _snake_case : List[bool] = [True, True, True, True] , _snake_case : int = 448 , _snake_case : int = 32 , _snake_case : int = 4 , _snake_case : int = 7 , _snake_case : int = 5 , _snake_case : int = 8 , _snake_case : int = 4 , _snake_case : float = 0.0 , _snake_case : int = 16 , _snake_case : int = 3 , _snake_case : int = 3 , _snake_case : int = 3 , _snake_case : int = 2 , _snake_case : int = 1 , _snake_case : float = 0.0 , _snake_case : int = 1 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : float = 1E-5 , _snake_case : str = "gelu" , _snake_case : float = 0.02 , _snake_case : float = 1E-1_2 , _snake_case : int = 224 , _snake_case : float = 1E-0_5 , **_snake_case : Union[str, Any] , ):
super().__init__(**_snake_case )
__lowercase : List[Any] = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : List[str] = hidden_sizes
__lowercase : List[Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Union[str, Any] = initializer_range
__lowercase : Any = layer_norm_eps
__lowercase : int = patch_size
__lowercase : Any = num_channels
__lowercase : int = depths
__lowercase : List[Any] = mlp_expansion_ratio
__lowercase : str = downsamples
__lowercase : List[str] = dim
__lowercase : Any = key_dim
__lowercase : str = attention_ratio
__lowercase : Union[str, Any] = resolution
__lowercase : Optional[int] = pool_size
__lowercase : List[Any] = downsample_patch_size
__lowercase : Tuple = downsample_stride
__lowercase : List[str] = downsample_pad
__lowercase : Union[str, Any] = drop_path_rate
__lowercase : Union[str, Any] = num_metaad_blocks
__lowercase : Tuple = distillation
__lowercase : Union[str, Any] = use_layer_scale
__lowercase : List[str] = layer_scale_init_value
__lowercase : Any = image_size
__lowercase : Union[str, Any] = batch_norm_eps
| 284 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = (DDPMScheduler,)
def snake_case_ ( self : Dict , **_snake_case : List[str] ):
__lowercase : Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_snake_case )
return config
def snake_case_ ( self : Union[str, Any] ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case )
def snake_case_ ( self : Dict ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def snake_case_ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_snake_case )
def snake_case_ ( self : Optional[int] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_snake_case )
def snake_case_ ( self : Tuple ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_snake_case )
def snake_case_ ( self : Any ):
self.check_over_configs(thresholding=_snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , )
def snake_case_ ( self : List[str] ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def snake_case_ ( self : Dict ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_snake_case )
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config()
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def snake_case_ ( self : Tuple ):
__lowercase : str = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : List[Any] = len(_snake_case )
__lowercase : Dict = self.dummy_model()
__lowercase : Tuple = self.dummy_sample_deter
__lowercase : List[str] = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__lowercase : Optional[int] = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__lowercase : int = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase : Any = pred_prev_sample
__lowercase : str = torch.sum(torch.abs(_snake_case ) )
__lowercase : Any = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1E-2
assert abs(result_mean.item() - 0.33_72 ) < 1E-3
def snake_case_ ( self : Tuple ):
__lowercase : str = self.scheduler_classes[0]
__lowercase : str = self.get_scheduler_config(prediction_type='''v_prediction''' )
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
__lowercase : List[Any] = len(_snake_case )
__lowercase : List[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter
__lowercase : Dict = torch.manual_seed(0 )
for t in reversed(range(_snake_case ) ):
# 1. predict noise residual
__lowercase : Tuple = model(_snake_case , _snake_case )
# 2. predict previous mean of sample x_t-1
__lowercase : Optional[int] = scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__lowercase : Any = pred_prev_sample
__lowercase : int = torch.sum(torch.abs(_snake_case ) )
__lowercase : List[Any] = torch.mean(torch.abs(_snake_case ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1E-2
assert abs(result_mean.item() - 0.26_31 ) < 1E-3
def snake_case_ ( self : str ):
__lowercase : int = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : List[str] = scheduler_class(**_snake_case )
__lowercase : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_snake_case )
__lowercase : int = scheduler.timesteps
for i, timestep in enumerate(_snake_case ):
if i == len(_snake_case ) - 1:
__lowercase : Optional[Any] = -1
else:
__lowercase : List[str] = timesteps[i + 1]
__lowercase : Optional[Any] = scheduler.previous_timestep(_snake_case )
__lowercase : str = prev_t.item()
self.assertEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Optional[Any] = self.get_scheduler_config()
__lowercase : Union[str, Any] = scheduler_class(**_snake_case )
__lowercase : Any = [100, 87, 50, 51, 0]
with self.assertRaises(_snake_case , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : Union[str, Any] = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : Dict = [100, 87, 50, 1, 0]
__lowercase : str = len(_snake_case )
with self.assertRaises(_snake_case , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_snake_case , timesteps=_snake_case )
def snake_case_ ( self : str ):
__lowercase : List[str] = self.scheduler_classes[0]
__lowercase : Optional[int] = self.get_scheduler_config()
__lowercase : int = scheduler_class(**_snake_case )
__lowercase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_snake_case )
| 284 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__magic_name__: List[Any] = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__snake_case , cache_dir=__snake_case )
__magic_name__: List[str] = [t[-1] for t in os.walk(os.path.join(__snake_case , os.listdir(__snake_case )[0] , """snapshots""" ) )]
__magic_name__: Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
__magic_name__, __magic_name__: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=__snake_case )
__magic_name__: List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: str = jax.random.PRNGKey(0 )
__magic_name__: Optional[int] = 4
__magic_name__: List[str] = jax.device_count()
__magic_name__: Dict = num_samples * [prompt]
__magic_name__: List[str] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
__magic_name__: Tuple = replicate(__snake_case )
__magic_name__: List[str] = jax.random.split(__snake_case , __snake_case )
__magic_name__: List[Any] = shard(__snake_case )
__magic_name__: Dict = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1E-3
assert np.abs(np.abs(__snake_case , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__magic_name__: str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__magic_name__, __magic_name__: Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=__snake_case )
__magic_name__: List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: Dict = jax.random.PRNGKey(0 )
__magic_name__: int = 5_0
__magic_name__: int = jax.device_count()
__magic_name__: str = num_samples * [prompt]
__magic_name__: str = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
__magic_name__: List[Any] = replicate(__snake_case )
__magic_name__: int = jax.random.split(__snake_case , __snake_case )
__magic_name__: Tuple = shard(__snake_case )
__magic_name__: Tuple = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__magic_name__, __magic_name__: Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__snake_case )
__magic_name__: str = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: List[str] = jax.random.PRNGKey(0 )
__magic_name__: Optional[int] = 5_0
__magic_name__: Tuple = jax.device_count()
__magic_name__: Dict = num_samples * [prompt]
__magic_name__: Dict = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
__magic_name__: List[str] = replicate(__snake_case )
__magic_name__: Tuple = jax.random.split(__snake_case , __snake_case )
__magic_name__: Optional[Any] = shard(__snake_case )
__magic_name__: List[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
__magic_name__, __magic_name__: Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
__magic_name__: str = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: Dict = jax.random.PRNGKey(0 )
__magic_name__: Union[str, Any] = 5_0
__magic_name__: Optional[Any] = jax.device_count()
__magic_name__: int = num_samples * [prompt]
__magic_name__: Union[str, Any] = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
__magic_name__: Optional[Any] = replicate(__snake_case )
__magic_name__: Any = jax.random.split(__snake_case , __snake_case )
__magic_name__: Optional[int] = shard(__snake_case )
__magic_name__: Optional[int] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__magic_name__: Union[str, Any] = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=__snake_case , steps_offset=1 , )
__magic_name__, __magic_name__: str = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=__snake_case , safety_checker=__snake_case , )
__magic_name__: Dict = scheduler.create_state()
__magic_name__: List[str] = scheduler_state
__magic_name__: Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: Optional[Any] = jax.random.PRNGKey(0 )
__magic_name__: List[Any] = 5_0
__magic_name__: Tuple = jax.device_count()
__magic_name__: Union[str, Any] = num_samples * [prompt]
__magic_name__: Any = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
__magic_name__: List[str] = replicate(__snake_case )
__magic_name__: List[str] = jax.random.split(__snake_case , __snake_case )
__magic_name__: Dict = shard(__snake_case )
__magic_name__: Optional[Any] = pipeline(__snake_case , __snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1E-3
assert np.abs((np.abs(__snake_case , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
__magic_name__: List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
__magic_name__: Union[str, Any] = jax.device_count()
__magic_name__: List[str] = num_samples * [prompt]
__magic_name__: List[str] = jax.random.split(jax.random.PRNGKey(0 ) , __snake_case )
__magic_name__, __magic_name__: int = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__snake_case , )
__magic_name__: str = replicate(__snake_case )
__magic_name__: List[Any] = pipeline.prepare_inputs(__snake_case )
__magic_name__: Optional[Any] = shard(__snake_case )
__magic_name__: Any = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
__magic_name__, __magic_name__: Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=__snake_case , use_memory_efficient_attention=__snake_case , )
__magic_name__: Union[str, Any] = replicate(__snake_case )
__magic_name__: List[str] = pipeline.prepare_inputs(__snake_case )
__magic_name__: Dict = shard(__snake_case )
__magic_name__: List[Any] = pipeline(__snake_case , __snake_case , __snake_case , jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
__magic_name__: Tuple = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 96 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 190 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Tuple = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 486 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCamelCase__ : int = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
UpperCamelCase__ : Union[str, Any] = {
"""169M""": 7_68,
"""430M""": 10_24,
"""1B5""": 20_48,
"""3B""": 25_60,
"""7B""": 40_96,
"""14B""": 51_20,
}
def A_( A ):
UpperCAmelCase_ = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase_ = state_dict.pop(A )
# emb -> embedding
if name.startswith("""emb.""" ):
UpperCAmelCase_ = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
UpperCAmelCase_ = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
UpperCAmelCase_ = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , A )
# ffn -> feed_forward
UpperCAmelCase_ = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , A )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
UpperCAmelCase_ = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
UpperCAmelCase_ = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
UpperCAmelCase_ = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
UpperCAmelCase_ = """rwkv.""" + name
UpperCAmelCase_ = weight
return state_dict
def A_( A , A , A , A=None , A=None , A=False , A=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
UpperCAmelCase_ = 50277
UpperCAmelCase_ = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
UpperCAmelCase_ = PreTrainedTokenizerFast(tokenizer_file=A )
UpperCAmelCase_ = len(A )
tokenizer.save_pretrained(A )
# 2. Build the config
UpperCAmelCase_ = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase_ = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
UpperCAmelCase_ = RwkvConfig(
vocab_size=A , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(A )
# 3. Download model file then convert state_dict
UpperCAmelCase_ = hf_hub_download(A , A )
UpperCAmelCase_ = torch.load(A , map_location="""cpu""" )
UpperCAmelCase_ = convert_state_dict(A )
# 4. Split in shards and save
UpperCAmelCase_ , UpperCAmelCase_ = shard_checkpoint(A )
for shard_file, shard in shards.items():
torch.save(A , os.path.join(A , A ) )
if index is not None:
UpperCAmelCase_ = os.path.join(A , A )
# Save the index as well
with open(A , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase_ = json.dumps(A , indent=2 , sort_keys=A ) + """\n"""
f.write(A )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
UpperCAmelCase_ = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase_ = torch.load(os.path.join(A , A ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A , A ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(A )
model.push_to_hub(A , max_shard_size="""2GB""" )
tokenizer.push_to_hub(A )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 486 | 1 |
def __UpperCamelCase ( lowercase__ : str , lowercase__ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : Tuple = f'Expected string as input, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : Any = f'Expected boolean as use_pascal parameter, found {type(lowercase__ )}'
raise ValueError(lowercase__ )
lowerCAmelCase_ : Dict = input_str.split("""_""" )
lowerCAmelCase_ : Optional[Any] = 0 if use_pascal else 1
lowerCAmelCase_ : str = words[start_index:]
lowerCAmelCase_ : Any = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase_ : List[str] = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 600 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Any = UNetaDModel
__snake_case : str = """sample"""
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : List[str] ):
return (3, 32, 32)
@property
def A ( self : List[Any] ):
return (3, 32, 32)
def A ( self : Optional[int] ):
lowerCAmelCase_ : Any = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Dict = UNetaDModel
__snake_case : List[str] = """sample"""
@property
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = 4
lowerCAmelCase_ : Dict = 4
lowerCAmelCase_ : Optional[Any] = (32, 32)
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Any ):
return (4, 32, 32)
@property
def A ( self : str ):
return (4, 32, 32)
def A ( self : List[str] ):
lowerCAmelCase_ : str = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowerCAmelCase_ : Dict = self.dummy_input
return init_dict, inputs_dict
def A ( self : Optional[int] ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A ( self : List[str] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
lowerCAmelCase_ : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : Dict = noise.to(UpperCAmelCase )
lowerCAmelCase_ : int = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = model_accelerate(UpperCAmelCase , UpperCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase_ , lowerCAmelCase_ : int = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
lowerCAmelCase_ : List[str] = model_normal_load(UpperCAmelCase , UpperCAmelCase )["""sample"""]
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowerCAmelCase_ : List[str] = noise.to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : str = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Any = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = UNetaDModel
__snake_case : Any = """sample"""
@property
def A ( self : Optional[int] , UpperCAmelCase : Optional[int]=(32, 32) ):
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : str = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
return (3, 32, 32)
@property
def A ( self : Tuple ):
return (3, 32, 32)
def A ( self : Tuple ):
lowerCAmelCase_ : str = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowerCAmelCase_ : Any = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = self.dummy_input
lowerCAmelCase_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(UpperCAmelCase )
lowerCAmelCase_ : Dict = noise
lowerCAmelCase_ : str = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : int = 3
lowerCAmelCase_ : Union[str, Any] = (2_56, 2_56)
lowerCAmelCase_ : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : List[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCAmelCase )
lowerCAmelCase_ : str = 4
lowerCAmelCase_ : Optional[int] = 3
lowerCAmelCase_ : Tuple = (32, 32)
lowerCAmelCase_ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : int = model(UpperCAmelCase , UpperCAmelCase ).sample
lowerCAmelCase_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase_ : Optional[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def A ( self : int ):
# not required for this model
pass
| 600 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase__ ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCAmelCase ) , version.parse(_lowerCAmelCase ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def UpperCamelCase_ ( A__ , A__ = None ):
a_ = F'''\n{hint}''' if hint is not None else ""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , _lowerCAmelCase ):
a_ = requirement, None, None
else:
a_ = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _lowerCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
a_ = match[0]
a_ = want_full.split(""",""" ) # there could be multiple requirements
a_ = {}
for w in want_range:
a_ = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , _lowerCAmelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
a_ = match[0]
a_ = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
a_ = ".".join([str(_lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return
# check if any version is installed
try:
a_ = importlib.metadata.version(_lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def UpperCamelCase_ ( A__ ):
a_ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(_lowerCAmelCase , _lowerCAmelCase )
| 711 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
a_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCamelCase_ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__a = None
try:
import msvcrt
except ImportError:
__a = None
try:
import fcntl
except ImportError:
__a = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__a = OSError
# Data
# ------------------------------------------------
__a = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__a = "3.0.12"
__a = None
def __snake_case( ) -> int:
global _logger
snake_case__ : List[str] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case_ : List[str] ):
snake_case__ : List[str] = lock_file
return None
def __str__( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = f"The file lock '{self.lock_file}' could not be acquired."
return temp
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , snake_case_ : List[Any] ):
snake_case__ : Tuple = lock
return None
def __enter__( self : int ):
return self.lock
def __exit__( self : List[Any] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Any ):
self.lock.release()
return None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Dict , snake_case_ : List[Any]=-1 , snake_case_ : List[Any]=None ):
snake_case__ : Optional[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
snake_case__ : Union[str, Any] = self.hash_filename_if_too_long(snake_case_ , snake_case_ )
# The path to the lock file.
snake_case__ : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
snake_case__ : Dict = None
# The default timeout value.
snake_case__ : List[str] = timeout
# We use this lock primarily for the lock counter.
snake_case__ : Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
snake_case__ : List[Any] = 0
return None
@property
def lowerCamelCase ( self : Dict ):
return self._lock_file
@property
def lowerCamelCase ( self : List[str] ):
return self._timeout
@timeout.setter
def lowerCamelCase ( self : Any , snake_case_ : List[str] ):
snake_case__ : Dict = float(snake_case_ )
return None
def lowerCamelCase ( self : str ):
raise NotImplementedError()
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
@property
def lowerCamelCase ( self : Any ):
return self._lock_file_fd is not None
def lowerCamelCase ( self : Tuple , snake_case_ : List[str]=None , snake_case_ : Any=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
snake_case__ : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
snake_case__ : Dict = id(self )
snake_case__ : Dict = self._lock_file
snake_case__ : Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(snake_case_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
snake_case__ : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[str]=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
snake_case__ : List[str] = id(self )
snake_case__ : Any = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
snake_case__ : Union[str, Any] = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self : int ):
self.acquire()
return self
def __exit__( self : int , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
self.release()
return None
def __del__( self : Optional[Any] ):
self.release(force=snake_case_ )
return None
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : int ):
snake_case__ : str = os.path.basename(snake_case_ )
if len(snake_case_ ) > max_length and max_length > 0:
snake_case__ : List[str] = os.path.dirname(snake_case_ )
snake_case__ : str = str(hash(snake_case_ ) )
snake_case__ : List[str] = filename[: max_length - len(snake_case_ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(snake_case_ , snake_case_ )
else:
return path
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=-1 , snake_case_ : str=None ):
from .file_utils import relative_to_absolute_path
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
snake_case__ : Optional[int] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
snake_case__ : Optional[Any] = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
try:
msvcrt.locking(snake_case_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(snake_case_ )
else:
snake_case__ : Tuple = fd
return None
def lowerCamelCase ( self : Tuple ):
snake_case__ : Optional[int] = self._lock_file_fd
snake_case__ : Dict = None
msvcrt.locking(snake_case_ , msvcrt.LK_UNLCK , 1 )
os.close(snake_case_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : str , snake_case_ : Dict , snake_case_ : Union[str, Any]=-1 , snake_case_ : int=None ):
snake_case__ : int = os.statvfs(os.path.dirname(snake_case_ ) ).f_namemax
super().__init__(snake_case_ , timeout=snake_case_ , max_filename_length=snake_case_ )
def lowerCamelCase ( self : str ):
snake_case__ : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
snake_case__ : Any = os.open(self._lock_file , snake_case_ )
try:
fcntl.flock(snake_case_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(snake_case_ )
else:
snake_case__ : Tuple = fd
return None
def lowerCamelCase ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
snake_case__ : Optional[Any] = self._lock_file_fd
snake_case__ : Tuple = None
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
os.close(snake_case_ )
return None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
snake_case__ : Tuple = os.open(self._lock_file , snake_case_ )
except OSError:
pass
else:
snake_case__ : Union[str, Any] = fd
return None
def lowerCamelCase ( self : Dict ):
os.close(self._lock_file_fd )
snake_case__ : Any = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__a = None
if msvcrt:
__a = WindowsFileLock
elif fcntl:
__a = UnixFileLock
else:
__a = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 374 |
'''simple docstring'''
import sys
__a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __snake_case( _lowerCAmelCase = N ) -> int:
snake_case__ : Dict = -sys.maxsize - 1
for i in range(len(_lowerCAmelCase ) - 12 ):
snake_case__ : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case__ : List[str] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 374 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = "ylacombe/bark-small"
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = "en_speaker_1"
__lowerCAmelCase = "This is a test string"
__lowerCAmelCase = "speaker_embeddings_path.json"
__lowerCAmelCase = "speaker_embeddings"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def __UpperCAmelCase ( self )-> Tuple:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BarkProcessor(tokenizer=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCAmelCase = 3_5
__lowerCAmelCase = 2
__lowerCAmelCase = 8
__lowerCAmelCase = {
"semantic_prompt": np.ones(__UpperCamelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCAmelCase = processor(text=self.input_string , voice_preset=__UpperCamelCase )
__lowerCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCAmelCase = os.path.join(self.tmpdirname , "file.npz" )
np.savez(__UpperCamelCase , **__UpperCamelCase )
__lowerCAmelCase = processor(text=self.input_string , voice_preset=__UpperCamelCase )
__lowerCAmelCase = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = BarkProcessor(tokenizer=__UpperCamelCase )
__lowerCAmelCase = processor(text=self.input_string )
__lowerCAmelCase = tokenizer(
self.input_string , padding="max_length" , max_length=2_5_6 , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 712 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : str = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class _UpperCamelCase (a_ ):
snake_case_ = 42
class _UpperCamelCase (a_ ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Dict:
super().__init__()
self.register_modules(
prior=__UpperCamelCase , image_encoder=__UpperCamelCase , image_processor=__UpperCamelCase , scheduler=__UpperCamelCase , renderer=__UpperCamelCase , )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
if latents is None:
__lowerCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__lowerCAmelCase = latents.to(__UpperCamelCase )
__lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __UpperCAmelCase ( self , __UpperCamelCase=0 )-> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device(F"""cuda:{gpu_id}""" )
__lowerCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCamelCase , __UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> List[str]:
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__UpperCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> Union[str, Any]:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase = torch.cat(__UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(__UpperCamelCase , axis=0 )
if not isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = self.image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase = image.to(dtype=self.image_encoder.dtype , device=__UpperCamelCase )
__lowerCAmelCase = self.image_encoder(__UpperCamelCase )["last_hidden_state"]
__lowerCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase = image_embeds.repeat_interleave(__UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase = torch.zeros_like(__UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(__UpperCamelCase )
def __call__( self , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 2_5 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 4.0 , __UpperCamelCase = 6_4 , __UpperCamelCase = "pil" , __UpperCamelCase = True , )-> Dict:
if isinstance(__UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = 1
elif isinstance(__UpperCamelCase , torch.Tensor ):
__lowerCAmelCase = image.shape[0]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase = len(__UpperCamelCase )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCamelCase )}""" )
__lowerCAmelCase = self._execution_device
__lowerCAmelCase = batch_size * num_images_per_prompt
__lowerCAmelCase = guidance_scale > 1.0
__lowerCAmelCase = self._encode_image(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# prior
self.scheduler.set_timesteps(__UpperCamelCase , device=__UpperCamelCase )
__lowerCAmelCase = self.scheduler.timesteps
__lowerCAmelCase = self.prior.config.num_embeddings
__lowerCAmelCase = self.prior.config.embedding_dim
__lowerCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase = latents.reshape(latents.shape[0] , __UpperCamelCase , __UpperCamelCase )
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = self.prior(
__UpperCamelCase , timestep=__UpperCamelCase , proj_embedding=__UpperCamelCase , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase = noise_pred.chunk(2 )
__lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase = self.scheduler.step(
__UpperCamelCase , timestep=__UpperCamelCase , sample=__UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__UpperCamelCase )
__lowerCAmelCase = []
for i, latent in enumerate(__UpperCamelCase ):
print()
__lowerCAmelCase = self.renderer.decode(
latent[None, :] , __UpperCamelCase , size=__UpperCamelCase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(__UpperCamelCase )
__lowerCAmelCase = torch.stack(__UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__lowerCAmelCase = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase = [self.numpy_to_pil(__UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__UpperCamelCase )
| 290 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=13 , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Optional[int]=99 , lowerCAmelCase__ : str=32 , lowerCAmelCase__ : Tuple=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Any=37 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Optional[Any]=512 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Any=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case__ ( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = NystromformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCamelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = NystromformerForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = NystromformerForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NystromformerForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = NystromformerForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = NystromformerForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : Optional[Any] = (
{
'feature-extraction': NystromformerModel,
'fill-mask': NystromformerForMaskedLM,
'question-answering': NystromformerForQuestionAnswering,
'text-classification': NystromformerForSequenceClassification,
'token-classification': NystromformerForTokenClassification,
'zero-shot': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : Dict = False
_snake_case : int = False
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = NystromformerModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def snake_case__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def snake_case__ ( self : str ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def snake_case__ ( self : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = NystromformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase )[0]
_UpperCamelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCamelCase = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = '''the [MASK] of Belgium is Brussels'''
_UpperCamelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
_UpperCamelCase = tokenizer(__UpperCamelCase , return_tensors='''pt''' )
with torch.no_grad():
_UpperCamelCase = model(encoding.input_ids ).logits
_UpperCamelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , '''capital''' )
| 98 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
try:
A = tempfile.mktemp()
with open(__UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase )
A = AlbertTokenizer.from_pretrained(__UpperCamelCase )
finally:
os.remove(__UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase )
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self : str ) -> int:
# This test is for deprecated behavior and can be removed in v5
A = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Dict:
A = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(__UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
A = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
A = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self : Dict ) -> Tuple:
A = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
A = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
A = Trie()
A = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCamelCase , ['AB', 'C'] ) | 106 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=__snake_case ):
'''simple docstring'''
snake_case_ : Tuple = ["""keras_nlp"""]
def __init__( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""])
| 719 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : List[str] = ["""image_processor""", """tokenizer"""]
snake_case_ : str = """ChineseCLIPImageProcessor"""
snake_case_ : Tuple = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : str) -> Optional[Any]:
"""simple docstring"""
_snake_case : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
_snake_case : Tuple = kwargs.pop("""feature_extractor""")
_snake_case : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase , lowerCAmelCase)
_snake_case : Optional[int] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , **lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_snake_case : Dict = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
if text is not None and images is not None:
_snake_case : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase) , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Any) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : int) -> str:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase)
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase , )
return self.image_processor_class
| 198 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int = 10 , lowerCAmelCase__ : int = 1000 , lowerCAmelCase__ : bool = True ) -> int:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
return int((number_a + number_a) / 2 )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> None:
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(lowerCAmelCase__ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
__a = lower
__a = higher
__a = []
while True:
__a = get_avg(lowerCAmelCase__ , lowerCAmelCase__ )
last_numbers.append(lowerCAmelCase__ )
if answer(lowerCAmelCase__ ) == "low":
__a = number
elif answer(lowerCAmelCase__ ) == "high":
__a = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def lowercase ( ) -> None:
__a = int(input('''Enter lower value : ''' ).strip() )
__a = int(input('''Enter high value : ''' ).strip() )
__a = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 695 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'gpt_bigcode'
__UpperCAmelCase : Tuple = ['past_key_values']
__UpperCAmelCase : Dict = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_257 , _a=1_024 , _a=768 , _a=12 , _a=12 , _a=None , _a="gelu_pytorch_tanh" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1E-5 , _a=0.02 , _a=True , _a=True , _a=50_256 , _a=50_256 , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = scale_attn_weights
__a = use_cache
__a = attention_softmax_in_fpaa
__a = scale_attention_softmax_in_fpaa
__a = multi_query
__a = bos_token_id
__a = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 695 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 676 |
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations(snake_case__ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
snake_case__ : int , snake_case__ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__snake_case = sum(
count_of_possible_combinations_with_dp_array(target - item , snake_case__ )
for item in array )
__snake_case = answer
return answer
__snake_case = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ )
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int:
'''simple docstring'''
__snake_case = [0] * (target + 1)
__snake_case = 1
for i in range(1 , target + 1 ):
for j in range(snake_case__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : str = 3
UpperCAmelCase__ : Optional[int] = 5
UpperCAmelCase__ : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 676 | 1 |
def _snake_case (_snake_case : int , _snake_case : list) -> Any:
_enforce_args(_snake_case , _snake_case)
if n == 0:
return 0
_lowercase =float('-inf')
for i in range(1 , n + 1):
_lowercase =max(
_snake_case , prices[i - 1] + naive_cut_rod_recursive(n - i , _snake_case))
return max_revue
def _snake_case (_snake_case : int , _snake_case : list) -> List[str]:
_enforce_args(_snake_case , _snake_case)
_lowercase =[float('-inf') for _ in range(n + 1)]
return _top_down_cut_rod_recursive(_snake_case , _snake_case , _snake_case)
def _snake_case (_snake_case : int , _snake_case : list , _snake_case : list) -> Tuple:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase =float('-inf')
for i in range(1 , n + 1):
_lowercase =max(
_snake_case , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _snake_case , _snake_case) , )
_lowercase =max_revenue
return max_rev[n]
def _snake_case (_snake_case : int , _snake_case : list) -> Dict:
_enforce_args(_snake_case , _snake_case)
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase =[float('-inf') for _ in range(n + 1)]
_lowercase =0
for i in range(1 , n + 1):
_lowercase =max_rev[i]
for j in range(1 , i + 1):
_lowercase =max(_snake_case , prices[j - 1] + max_rev[i - j])
_lowercase =max_revenue_i
return max_rev[n]
def _snake_case (_snake_case : int , _snake_case : list) -> List[Any]:
if n < 0:
_lowercase =f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_snake_case)
if n > len(_snake_case):
_lowercase =(
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(_snake_case)}'''
)
raise ValueError(_snake_case)
def _snake_case () -> Any:
_lowercase =[6, 10, 12, 15, 20, 23]
_lowercase =len(_snake_case)
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase =36
_lowercase =top_down_cut_rod(_snake_case , _snake_case)
_lowercase =bottom_up_cut_rod(_snake_case , _snake_case)
_lowercase =naive_cut_rod_recursive(_snake_case , _snake_case)
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 181 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_SCREAMING_SNAKE_CASE = 2_99_79_24_58
# Symbols
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = symbols("ct x y z")
def _snake_case (_snake_case : float) -> float:
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!')
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!')
return velocity / c
def _snake_case (_snake_case : float) -> float:
return 1 / sqrt(1 - beta(_snake_case) ** 2)
def _snake_case (_snake_case : float) -> np.ndarray:
return np.array(
[
[gamma(_snake_case), -gamma(_snake_case) * beta(_snake_case), 0, 0],
[-gamma(_snake_case) * beta(_snake_case), gamma(_snake_case), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
def _snake_case (_snake_case : float , _snake_case : np.ndarray | None = None) -> np.ndarray:
# Ensure event is not empty
if event is None:
_lowercase =np.array([ct, x, y, z]) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_snake_case) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_SCREAMING_SNAKE_CASE = transform(29_97_92_45)
print("Example of four vector: ")
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_SCREAMING_SNAKE_CASE = {ct: c, x: 1, y: 1, z: 1}
_SCREAMING_SNAKE_CASE = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 181 | 1 |
'''simple docstring'''
def __UpperCamelCase ( a : list[int] , a : list[int] , a : int ) ->bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a ) )
def __UpperCamelCase ( a : list[list[int]] , a : int , a : list[int] , a : int ) ->bool:
# Base Case
if index == len(a ):
return True
# Recursive Step
for i in range(a ):
if valid_coloring(graph[index] , a , a ):
# Color current vertex
snake_case = i
# Validate coloring
if util_color(a , a , a , index + 1 ):
return True
# Backtrack
snake_case = -1
return False
def __UpperCamelCase ( a : list[list[int]] , a : int ) ->list[int]:
snake_case = [-1] * len(a )
if util_color(a , a , a , 0 ):
return colored_vertices
return []
| 44 |
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase__ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase__ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase__ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False ):
if return_pvalue:
snake_case_ = pearsonr(__snake_case , __snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__snake_case , __snake_case )[0] )}
| 508 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase: str = 'Muhammad Umer Farooq'
lowerCAmelCase: List[str] = 'MIT'
lowerCAmelCase: Tuple = '1.0.0'
lowerCAmelCase: List[Any] = 'Muhammad Umer Farooq'
lowerCAmelCase: Optional[Any] = 'contact@muhammadumerfarooq.me'
lowerCAmelCase: Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : str ):
super().__init__()
a : list[str] = []
a : List[Any] = domain
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a : Tuple = parse.urljoin(self.domain , __snake_case )
self.urls.append(__snake_case )
def lowerCamelCase__ ( _A ):
return ".".join(get_sub_domain_name(_A ).split('.' )[-2:] )
def lowerCamelCase__ ( _A ):
return parse.urlparse(_A ).netloc
def lowerCamelCase__ ( _A = "https://github.com" ):
a : Any = get_domain_name(_A )
# Initialize the parser
a : Tuple = Parser(_A )
try:
# Open URL
a : List[Any] = requests.get(_A )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a : Union[str, Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a : int = requests.get(_A )
# Get the valid email.
a : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_A )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_A )
if __name__ == "__main__":
lowerCAmelCase: Any = emails_from_url('https://github.com')
print(F"{len(emails)} emails found:")
print('\n'.join(sorted(emails))) | 526 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : str = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 19 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class lowerCAmelCase_( __lowerCAmelCase ):
'''simple docstring'''
__lowercase : str = '''xlm'''
__lowercase : Dict = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self ,__UpperCAmelCase=3_0145 ,__UpperCAmelCase=2048 ,__UpperCAmelCase=12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=1 ,__UpperCAmelCase=True ,__UpperCAmelCase=512 ,__UpperCAmelCase=2048**-0.5 ,__UpperCAmelCase=1E-12 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=0 ,__UpperCAmelCase=1 ,__UpperCAmelCase=2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=5 ,__UpperCAmelCase=True ,__UpperCAmelCase="first" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5 ,__UpperCAmelCase=5 ,__UpperCAmelCase=0 ,__UpperCAmelCase=0 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0 ,**__UpperCAmelCase ,) -> int:
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : List[Any] = emb_dim
lowerCAmelCase__ : int = n_layers
lowerCAmelCase__ : Dict = n_heads
lowerCAmelCase__ : Union[str, Any] = dropout
lowerCAmelCase__ : int = attention_dropout
lowerCAmelCase__ : List[str] = gelu_activation
lowerCAmelCase__ : Union[str, Any] = sinusoidal_embeddings
lowerCAmelCase__ : str = causal
lowerCAmelCase__ : Tuple = asm
lowerCAmelCase__ : Dict = n_langs
lowerCAmelCase__ : str = use_lang_emb
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Any = bos_index
lowerCAmelCase__ : Any = eos_index
lowerCAmelCase__ : Any = pad_index
lowerCAmelCase__ : int = unk_index
lowerCAmelCase__ : int = mask_index
lowerCAmelCase__ : Dict = is_encoder
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = embed_init_std
lowerCAmelCase__ : Any = init_std
lowerCAmelCase__ : Tuple = summary_type
lowerCAmelCase__ : List[Any] = summary_use_proj
lowerCAmelCase__ : Dict = summary_activation
lowerCAmelCase__ : Tuple = summary_proj_to_labels
lowerCAmelCase__ : Tuple = summary_first_dropout
lowerCAmelCase__ : Tuple = start_n_top
lowerCAmelCase__ : Optional[Any] = end_n_top
lowerCAmelCase__ : str = mask_token_id
lowerCAmelCase__ : Union[str, Any] = lang_id
if "n_words" in kwargs:
lowerCAmelCase__ : Any = kwargs["""n_words"""]
super().__init__(pad_token_id=lowerCAmelCase_ ,bos_token_id=lowerCAmelCase_ ,**lowerCAmelCase_ )
class lowerCAmelCase_( __lowerCAmelCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase__ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase__ : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 565 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Tuple:
_A = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
_A = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_A = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
_A = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
_A = shift_tokens_right(lowerCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
_A = model(lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits
_A = optax.softmax_cross_entropy(lowerCAmelCase_ , onehot(lowerCAmelCase_ , logits.shape[-1] ) ).mean()
_A = -(labels.shape[-1] * loss.item())
_A = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 401 | 0 |
"""simple docstring"""
import math
def lowerCamelCase (a_ :int) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase (a_ :float = 0.1) -> int:
lowercase :Optional[int] = 3
lowercase :Dict = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(a_)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase (a_ :Dict) -> Dict:
lowercase :Tuple = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_)
def lowerCamelCase (a_ :Union[str, Any]) -> str:
lowercase , lowercase :Tuple = emb.weight.shape
lowercase :List[str] = nn.Linear(a_ , a_ , bias=a_)
lowercase :List[str] = emb.weight.data
return lin_layer
def lowerCamelCase (a_ :int , a_ :Union[str, Any]="facebook/mbart-large-en-ro" , a_ :Union[str, Any]=False , a_ :List[Any]=False) -> List[Any]:
lowercase :List[Any] = torch.load(a_ , map_location='''cpu''')['''model''']
remove_ignore_keys_(a_)
lowercase :Dict = state_dict['''encoder.embed_tokens.weight'''].shape[0]
lowercase :Tuple = MBartConfig.from_pretrained(a_ , vocab_size=a_)
if mbart_aa and finetuned:
lowercase :List[Any] = '''relu'''
lowercase :Optional[int] = state_dict['''decoder.embed_tokens.weight''']
lowercase :Union[str, Any] = MBartForConditionalGeneration(a_)
model.model.load_state_dict(a_)
if finetuned:
lowercase :Dict = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 475 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case =logging.get_logger()
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : nn.Module
lowerCamelCase : List[nn.Module] = field(default_factory=_lowercase )
lowerCamelCase : list = field(default_factory=_lowercase )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = len(list(m.modules() ) ) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Dict ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return list(filter(lambda UpperCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class UpperCAmelCase_ :
lowerCamelCase : nn.Module
lowerCamelCase : nn.Module
lowerCamelCase : int = 0
lowerCamelCase : List = field(default_factory=_lowercase )
lowerCamelCase : List = field(default_factory=_lowercase )
def __call__( self : int , UpperCAmelCase__ : Tuple ) -> int:
lowerCAmelCase = Tracker(self.dest )(__SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase = Tracker(self.src )(__SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase = list(filter(lambda UpperCAmelCase__ : type(__SCREAMING_SNAKE_CASE ) not in self.src_skip , __SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = list(filter(lambda UpperCAmelCase__ : type(__SCREAMING_SNAKE_CASE ) not in self.dest_skip , __SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE )} operations while'''
F''' destination module has {len(__SCREAMING_SNAKE_CASE )}.''' )
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def a_ ( lowerCamelCase : str , lowerCamelCase : ResNetConfig , lowerCamelCase : Path , lowerCamelCase : bool = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
lowerCAmelCase = timm.create_model(lowerCamelCase , pretrained=lowerCamelCase ).eval()
lowerCAmelCase = ResNetForImageClassification(lowerCamelCase ).eval()
lowerCAmelCase = ModuleTransfer(src=lowerCamelCase , dest=lowerCamelCase )
lowerCAmelCase = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCamelCase )
assert torch.allclose(from_model(lowerCamelCase ) , our_model(lowerCamelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase = f'''resnet{'-'.join(name.split('resnet' ) )}'''
print(lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase , )
# we can use the convnext one
lowerCAmelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def a_ ( lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : bool = True ):
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = 1000
lowerCAmelCase = (1, num_labels)
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = num_labels
lowerCAmelCase = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase = {int(lowerCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = partial(lowerCamelCase , num_labels=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase )
lowerCAmelCase = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase , names_to_config[model_name] , lowerCamelCase , lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
__snake_case =parser.parse_args()
__snake_case =args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 133 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( UpperCamelCase : str , UpperCamelCase : list[str] | None = None ) -> list[list[str]]:
a__ = word_bank or []
# create a table
a__ = len(UpperCamelCase ) + 1
a__ = []
for _ in range(UpperCamelCase ):
table.append([] )
# seed value
a__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(UpperCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(UpperCamelCase )] == word:
a__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(UpperCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(UpperCamelCase )]:
combination.reverse()
return table[len(UpperCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 273 | 0 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__A = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( ):
'''simple docstring'''
_A = '''https://pypi.org/pypi/diffusers/json'''
_A = json.loads(request.urlopen(_lowercase ).read() )['''releases'''].keys()
return sorted(_lowercase , key=lambda _lowercase : version.Version(_lowercase ) )
def __A ( ):
'''simple docstring'''
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
_A = Path(_lowercase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __A ( _lowercase ):
'''simple docstring'''
init_hf_modules()
_A = Path(_lowercase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowercase , exist_ok=_lowercase )
_A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def __A ( _lowercase ):
'''simple docstring'''
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
_A = f.read()
# Imports of the form `import .xxx`
_A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , _lowercase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowercase ) )
def __A ( _lowercase ):
'''simple docstring'''
_A = False
_A = [module_file]
_A = []
# Let's recurse through all relative imports
while not no_change:
_A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase ) )
_A = Path(_lowercase ).parent
_A = [str(module_path / m ) for m in new_imports]
_A = [f for f in new_import_files if f not in all_relative_imports]
_A = [f"""{f}.py""" for f in new_import_files]
_A = len(_lowercase ) == 0
all_relative_imports.extend(_lowercase )
return all_relative_imports
def __A ( _lowercase ):
'''simple docstring'''
with open(_lowercase , '''r''' , encoding='''utf-8''' ) as f:
_A = f.read()
# Imports of the form `import xxx`
_A = re.findall('''^\s*import\s+(\S+)\s*$''' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , _lowercase , flags=re.MULTILINE )
# Only keep the top-level module
_A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
_A = list(set(_lowercase ) )
_A = []
for imp in imports:
try:
importlib.import_module(_lowercase )
except ImportError:
missing_packages.append(_lowercase )
if len(_lowercase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"""{", ".join(_lowercase )}. Run `pip install {" ".join(_lowercase )}`""" )
return get_relative_imports(_lowercase )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = module_path.replace(os.path.sep , '''.''' )
_A = importlib.import_module(_lowercase )
if class_name is None:
return find_pipeline_class(_lowercase )
return getattr(_lowercase , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
from ..pipelines import DiffusionPipeline
_A = dict(inspect.getmembers(_lowercase , inspect.isclass ) )
_A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowercase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_A = cls
return pipeline_class
def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , ):
'''simple docstring'''
_A = str(_lowercase )
_A = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
_A = module_file_or_url
_A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
_A = get_diffusers_versions()
# cut ".dev0"
_A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
_A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_A = f"""v{revision}"""
elif revision == "main":
_A = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
_A = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase )
try:
_A = cached_download(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
_A = '''git'''
_A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_A = hf_hub_download(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
_A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_A = check_imports(_lowercase )
# Now we move the module inside our cached dynamic modules.
_A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase )
_A = Path(_lowercase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase , submodule_path / module_file )
for module_needed in modules_needed:
_A = f"""{module_needed}.py"""
shutil.copy(os.path.join(_lowercase , _lowercase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase , _lowercase ):
_A = use_auth_token
elif use_auth_token is True:
_A = HfFolder.get_token()
else:
_A = None
_A = model_info(_lowercase , revision=_lowercase , token=_lowercase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_A = submodule_path / commit_hash
_A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase , f"""{module_needed}.py""" , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return os.path.join(_lowercase , _lowercase )
def __A ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ):
'''simple docstring'''
_A = get_cached_module_file(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return get_class_in_module(_lowercase , final_module.replace('''.py''' , '''''' ) )
| 62 |
def __A ( _lowercase = 1_00_00_00 ):
'''simple docstring'''
_A = 1
_A = 1
_A = {1: 1}
for inputa in range(2 , _lowercase ):
_A = 0
_A = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_A = (3 * number) + 1
counter += 1
if inputa not in counters:
_A = counter
if counter > pre_counter:
_A = inputa
_A = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 62 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = DebertaTokenizer
__A = True
__A = DebertaTokenizerFast
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""[UNK]""",
]
lowercase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase = {"""unk_token""": """[UNK]"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def _a ( self , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = """lower newer"""
lowercase = """lower newer"""
return input_text, output_text
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = """lower newer"""
lowercase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase = tokenizer.tokenize(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.get_tokenizer()
lowercase = tokenizer("""Hello""" , """World""" )
lowercase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , _lowerCAmelCase )
@slow
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _a ( self ) -> int:
'''simple docstring'''
lowercase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
lowercase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
lowercase = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase = [tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) for seq in encoding["""input_ids"""]]
# fmt: off
lowercase = {
"""input_ids""": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"""token_type_ids""": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase = [
"""ALBERT: A Lite BERT for Self-supervised Learning of Language Representations""",
"""ALBERT incorporates two parameter reduction techniques""",
"""The first one is a factorized embedding parameterization. By decomposing the large vocabulary"""
""" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"""
""" vocabulary embedding.""",
]
self.assertDictEqual(encoding.data , _lowerCAmelCase )
for expected, decoded in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 588 |
'''simple docstring'''
import cmath
import math
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
lowercase = math.radians(lowercase_ )
lowercase = math.radians(lowercase_ )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(lowercase_ , lowercase_ )
lowercase = cmath.rect(lowercase_ , lowercase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase_ = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = '''maskformer'''
__magic_name__ = {'''hidden_size''': '''mask_feature_size'''}
__magic_name__ = ['''resnet''', '''swin''']
__magic_name__ = ['''detr''']
def __init__( self : List[str] , lowerCAmelCase_ : int = 256 , lowerCAmelCase_ : int = 256 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : Optional[Dict] = None , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : float = 20.0 , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : Optional[Any] , ) -> Optional[int]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase_ : int = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = backbone_config.pop("model_type" )
UpperCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Dict = config_class.from_dict(lowerCAmelCase_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase_ : List[str] = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase_ : Any = (
decoder_config.pop("model_type" ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = CONFIG_MAPPING[decoder_type]
UpperCAmelCase_ : Any = config_class.from_dict(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = backbone_config
UpperCAmelCase_ : Any = decoder_config
# main feature dimension for the model
UpperCAmelCase_ : List[Any] = fpn_feature_size
UpperCAmelCase_ : Union[str, Any] = mask_feature_size
# initializer
UpperCAmelCase_ : Dict = init_std
UpperCAmelCase_ : Optional[Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase_ : int = cross_entropy_weight
UpperCAmelCase_ : Any = dice_weight
UpperCAmelCase_ : Any = mask_weight
UpperCAmelCase_ : List[str] = use_auxiliary_loss
UpperCAmelCase_ : str = no_object_weight
UpperCAmelCase_ : Optional[Any] = output_auxiliary_logits
UpperCAmelCase_ : Any = self.decoder_config.encoder_attention_heads
UpperCAmelCase_ : Optional[int] = self.decoder_config.num_hidden_layers
super().__init__(**lowerCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Union[str, Any] ) -> List[str]:
return cls(
backbone_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , **lowerCAmelCase_ , )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict[str, any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Dict = self.backbone_config.to_dict()
UpperCAmelCase_ : Optional[int] = self.decoder_config.to_dict()
UpperCAmelCase_ : Dict = self.__class__.model_type
return output
| 708 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowerCamelCase_ , lowerCamelCase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowerCamelCase_ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowerCamelCase_ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 463 | 0 |
'''simple docstring'''
import random
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = [], [], []
for element in data:
if element < pivot:
less.append(snake_case__ )
elif element > pivot:
greater.append(snake_case__ )
else:
equal.append(snake_case__ )
return less, equal, greater
def __lowercase ( __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(snake_case__ ) or index < 0:
return None
_A = items[random.randint(0 , len(snake_case__ ) - 1 )]
_A = 0
_A = _partition(snake_case__ , snake_case__ )
_A = len(snake_case__ )
_A = len(snake_case__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(snake_case__ , snake_case__ )
# must be in larger
else:
return quick_select(snake_case__ , index - (m + count) )
| 330 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 609 | 0 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 129 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__lowerCAmelCase = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__lowerCAmelCase = "UperNetConfig"
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[int, Tuple[int, int]] , __UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCamelCase : bool = False , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
_UpperCAmelCase = nn.Convad(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , bias=__UpperCamelCase , dilation=__UpperCamelCase , )
_UpperCAmelCase = nn.BatchNormad(__UpperCamelCase )
_UpperCAmelCase = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = self.conv(__UpperCamelCase )
_UpperCAmelCase = self.batch_norm(__UpperCamelCase )
_UpperCAmelCase = self.activation(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ):
super().__init__()
_UpperCAmelCase = [
nn.AdaptiveAvgPoolad(__UpperCamelCase ),
UperNetConvModule(__UpperCamelCase , __UpperCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = input
for layer in self.layers:
_UpperCAmelCase = layer(__UpperCamelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : Tuple[int, ...] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : bool ):
super().__init__()
_UpperCAmelCase = pool_scales
_UpperCAmelCase = align_corners
_UpperCAmelCase = in_channels
_UpperCAmelCase = channels
_UpperCAmelCase = []
for i, pool_scale in enumerate(__UpperCamelCase ):
_UpperCAmelCase = UperNetPyramidPoolingBlock(pool_scale=__UpperCamelCase , in_channels=__UpperCamelCase , channels=__UpperCamelCase )
self.blocks.append(__UpperCamelCase )
self.add_module(str(__UpperCamelCase ) , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
_UpperCAmelCase = []
for ppm in self.blocks:
_UpperCAmelCase = ppm(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(__UpperCamelCase )
return ppm_outs
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Tuple ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.pool_scales # e.g. (1, 2, 3, 6)
_UpperCAmelCase = in_channels
_UpperCAmelCase = config.hidden_size
_UpperCAmelCase = False
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
_UpperCAmelCase = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
_UpperCAmelCase = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
_UpperCAmelCase = nn.ModuleList()
_UpperCAmelCase = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
_UpperCAmelCase = UperNetConvModule(__UpperCamelCase , self.channels , kernel_size=1 )
_UpperCAmelCase = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCamelCase )
self.fpn_convs.append(__UpperCamelCase )
_UpperCAmelCase = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : str ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = inputs[-1]
_UpperCAmelCase = [x]
psp_outs.extend(self.psp_modules(__UpperCamelCase ) )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.bottleneck(__UpperCamelCase )
return output
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : torch.Tensor ):
# build laterals
_UpperCAmelCase = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCamelCase ) )
# build top-down path
_UpperCAmelCase = len(__UpperCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = laterals[i - 1].shape[2:]
_UpperCAmelCase = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCamelCase , mode="bilinear" , align_corners=self.align_corners )
# build outputs
_UpperCAmelCase = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
_UpperCAmelCase = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
_UpperCAmelCase = torch.cat(__UpperCamelCase , dim=1 )
_UpperCAmelCase = self.fpn_bottleneck(__UpperCamelCase )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( nn.Module):
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 3 , __UpperCamelCase : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
_UpperCAmelCase = config
_UpperCAmelCase = config.auxiliary_in_channels
_UpperCAmelCase = config.auxiliary_channels
_UpperCAmelCase = config.auxiliary_num_convs
_UpperCAmelCase = config.auxiliary_concat_input
_UpperCAmelCase = in_index
_UpperCAmelCase = (kernel_size // 2) * dilation
_UpperCAmelCase = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCamelCase , padding=__UpperCamelCase , dilation=__UpperCamelCase ) )
if self.num_convs == 0:
_UpperCAmelCase = nn.Identity()
else:
_UpperCAmelCase = nn.Sequential(*__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCamelCase , padding=kernel_size // 2 )
_UpperCAmelCase = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] ):
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ):
if isinstance(__UpperCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : torch.Tensor ):
# just take the relevant feature maps
_UpperCAmelCase = encoder_hidden_states[self.in_index]
_UpperCAmelCase = self.convs(__UpperCamelCase )
if self.concat_input:
_UpperCAmelCase = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
_UpperCAmelCase = self.classifier(__UpperCamelCase )
return output
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Dict = UperNetConfig
__SCREAMING_SNAKE_CASE : str = """pixel_values"""
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple=False ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = value
__lowerCAmelCase = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , lowercase , )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[int] , __UpperCamelCase : str ):
super().__init__(__UpperCamelCase )
_UpperCAmelCase = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
_UpperCAmelCase = UperNetHead(__UpperCamelCase , in_channels=self.backbone.channels )
_UpperCAmelCase = UperNetFCNHead(__UpperCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=__UpperCamelCase , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[torch.Tensor] = None , __UpperCamelCase : Optional[bool] = None , ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
_UpperCAmelCase = self.backbone.forward_with_filtered_kwargs(
__UpperCamelCase , output_hidden_states=__UpperCamelCase , output_attentions=__UpperCamelCase )
_UpperCAmelCase = outputs.feature_maps
_UpperCAmelCase = self.decode_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if self.auxiliary_head is not None:
_UpperCAmelCase = self.auxiliary_head(__UpperCamelCase )
_UpperCAmelCase = nn.functional.interpolate(
__UpperCamelCase , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=__UpperCamelCase )
_UpperCAmelCase = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
_UpperCAmelCase = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = loss_fct(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
_UpperCAmelCase = (logits,) + outputs[1:]
else:
_UpperCAmelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCamelCase , logits=__UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 129 | 1 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase__( lowercase : Optional[Any] ) -> int:
__snake_case : Union[str, Any] = tf.convert_to_tensor(lowercase )
__snake_case : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Tuple:
__snake_case : str = tf.convert_to_tensor(lowercase )
__snake_case : Optional[Any] = tf.cast(math.pi , x.dtype )
__snake_case : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
__snake_case : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase , 3 )) ))
return x * cdf
def lowerCAmelCase__( lowercase : Optional[Any] ) -> List[str]:
__snake_case : Tuple = tf.convert_to_tensor(lowercase )
return x * tf.tanh(tf.math.softplus(lowercase ) )
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Union[str, Any]:
__snake_case : List[Any] = tf.convert_to_tensor(lowercase )
__snake_case : Union[str, Any] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
__snake_case : Optional[Any] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase__( lowercase : Tuple ) -> str:
__snake_case : int = tf.convert_to_tensor(lowercase )
__snake_case : Optional[int] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase__( lowercase : Tuple ) -> Dict:
return tf.clip_by_value(_gelu(lowercase ) , -10 , 10 )
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any]=-1 ) -> Any:
__snake_case , __snake_case : List[str] = tf.split(lowercase , 2 , axis=lowercase )
return a * tf.math.sigmoid(lowercase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Union[str, Any]:
return tf.keras.activations.gelu(lowercase , approximate=lowercase )
_UpperCamelCase = tf.keras.activations.gelu
_UpperCamelCase = approximate_gelu_wrap
else:
_UpperCamelCase = _gelu
_UpperCamelCase = _gelu_new
_UpperCamelCase = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def lowerCAmelCase__( lowercase : List[Any] ) -> List[str]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 243 |
def lowerCAmelCase__( lowercase : str ) -> int:
assert column_title.isupper()
__snake_case : List[Any] = 0
__snake_case : int = len(lowercase ) - 1
__snake_case : Optional[Any] = 0
while index >= 0:
__snake_case : int = (ord(column_title[index] ) - 64) * pow(26 , lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 243 | 1 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ =logging.get_logger(__name__)
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : int = ["input_features", "attention_mask"]
def __init__(self : Dict , snake_case_ : Tuple=8_0 , snake_case_ : Tuple=1_6_0_0_0 , snake_case_ : Union[str, Any]=8_0 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=True , snake_case_ : Any=True , snake_case_ : int=True , **snake_case_ : Dict , ):
super().__init__(feature_size=snake_case_ , sampling_rate=snake_case_ , padding_value=snake_case_ , **snake_case_ )
__a : int = num_mel_bins
__a : Dict = do_ceptral_normalize
__a : Union[str, Any] = normalize_means
__a : int = normalize_vars
__a : Optional[Any] = True
def lowerCAmelCase (self : Any , snake_case_ : np.ndarray , ):
__a : Union[str, Any] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__a : Any = torch.from_numpy(snake_case_ ).unsqueeze(0 )
__a : List[Any] = ta_kaldi.fbank(snake_case_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase (snake_case_ : np.ndarray , snake_case_ : int , snake_case_ : Optional[bool] = True , snake_case_ : Optional[bool] = True , snake_case_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__a : Optional[int] = x[:input_length].mean(axis=0 )
__a : Optional[int] = np.subtract(snake_case_ , snake_case_ )
if normalize_vars:
__a : Optional[Any] = x[:input_length].std(axis=0 )
__a : Optional[Any] = np.divide(snake_case_ , snake_case_ )
if input_length < x.shape[0]:
__a : Optional[int] = padding_value
# make sure array is in float32
__a : Tuple = x.astype(np.floataa )
return x
def lowerCAmelCase (self : List[Any] , snake_case_ : List[np.ndarray] , snake_case_ : Optional[np.ndarray] = None ):
__a : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(snake_case_ , snake_case_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(snake_case_ , snake_case_ )
]
def __call__(self : List[str] , snake_case_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , **snake_case_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__a : Dict = isinstance(snake_case_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
__a : List[str] = is_batched_numpy or (
isinstance(snake_case_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case_ , np.ndarray ):
__a : Optional[int] = np.asarray(snake_case_ , dtype=np.floataa )
elif isinstance(snake_case_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a : Dict = [raw_speech]
# extract fbank features
__a : Union[str, Any] = [self._extract_fbank_features(snake_case_ ) for waveform in raw_speech]
# convert into correct format for padding
__a : str = BatchFeature({'''input_features''': features} )
__a : Union[str, Any] = self.pad(
snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
# make sure list is in array format
__a : List[Any] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , snake_case_ ):
__a : List[str] = [np.asarray(snake_case_ , dtype=np.floataa ) for feature in input_features]
__a : Tuple = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__a : Optional[int] = [np.asarray(snake_case_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a : int = (
np.array(snake_case_ , dtype=np.intaa )
if self._get_padding_strategies(snake_case_ , max_length=snake_case_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a : List[str] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=snake_case_ )
if return_tensors is not None:
__a : Optional[int] = padded_inputs.convert_to_tensors(snake_case_ )
return padded_inputs
| 707 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
__a : Dict = filter(lambda lowerCAmelCase__ : p.requires_grad , model.parameters() )
__a : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ =logging.getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ):
if metric == "rouge2":
__a : List[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__a : List[str] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__a : Optional[Any] = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
__a : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase__ , filename=lowerCAmelCase__ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase__ , verbose=lowerCAmelCase__ , )
class UpperCamelCase__ ( pl.Callback ):
def lowerCAmelCase (self : List[str] , snake_case_ : Any , snake_case_ : Any ):
__a : Optional[int] = {f"lr_group_{i}": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : str , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule , snake_case_ : str , snake_case_ : Dict=True ):
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
__a : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__a : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__a : Union[str, Any] = od / '''test_results.txt'''
__a : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__a : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
__a : List[str] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , '''a+''' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
__a : Tuple = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
__a : Optional[int] = val.item()
__a : List[str] = f"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
__a : Optional[Any] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
try:
__a : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
__a : int = pl_module.model.num_parameters()
__a : Any = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase (self : Optional[int] , snake_case_ : pl.Trainer , snake_case_ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , '''test''' )
@rank_zero_only
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : pl.Trainer , snake_case_ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 326 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a = logging.get_logger("""transformers.models.encodec""")
_a = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_a = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_a = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_a = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_a = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a = []
_a = []
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
_UpperCamelCase = getattr(__snake_case, __snake_case )
if weight_type is not None:
_UpperCamelCase = getattr(__snake_case, __snake_case ).shape
else:
_UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
elif weight_type == "running_mean":
_UpperCamelCase = value
elif weight_type == "running_var":
_UpperCamelCase = value
elif weight_type == "num_batches_tracked":
_UpperCamelCase = value
elif weight_type == "weight_ih_l0":
_UpperCamelCase = value
elif weight_type == "weight_hh_l0":
_UpperCamelCase = value
elif weight_type == "bias_ih_l0":
_UpperCamelCase = value
elif weight_type == "bias_hh_l0":
_UpperCamelCase = value
elif weight_type == "weight_ih_l1":
_UpperCamelCase = value
elif weight_type == "weight_hh_l1":
_UpperCamelCase = value
elif weight_type == "bias_ih_l1":
_UpperCamelCase = value
elif weight_type == "bias_hh_l1":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
_UpperCamelCase = MAPPING_24K
elif model_name == "encodec_48khz":
_UpperCamelCase = MAPPING_48K
else:
raise ValueError(F'''Unsupported model: {model_name}''' )
for name, value in orig_dict.items():
if should_ignore(__snake_case, __snake_case ):
logger.info(F'''{name} was ignored''' )
continue
_UpperCamelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_UpperCamelCase , _UpperCamelCase = key.split('''.*.''' )
if prefix in name and suffix in name:
_UpperCamelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(__snake_case )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', __snake_case )
if "weight_g" in name:
_UpperCamelCase = '''weight_g'''
elif "weight_v" in name:
_UpperCamelCase = '''weight_v'''
elif "weight_ih_l0" in name:
_UpperCamelCase = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_UpperCamelCase = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_UpperCamelCase = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_UpperCamelCase = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_UpperCamelCase = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_UpperCamelCase = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_UpperCamelCase = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_UpperCamelCase = '''bias_hh_l1'''
elif "bias" in name:
_UpperCamelCase = '''bias'''
elif "weight" in name:
_UpperCamelCase = '''weight'''
elif "running_mean" in name:
_UpperCamelCase = '''running_mean'''
elif "running_var" in name:
_UpperCamelCase = '''running_var'''
elif "num_batches_tracked" in name:
_UpperCamelCase = '''num_batches_tracked'''
else:
_UpperCamelCase = None
set_recursively(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Dict:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = EncodecConfig.from_pretrained(__snake_case )
else:
_UpperCamelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_UpperCamelCase = [8, 5, 4, 4]
_UpperCamelCase = [2.2]
_UpperCamelCase = 64
_UpperCamelCase = 3_20_00
_UpperCamelCase = 20_48
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
elif model_name == "encodec_48khz":
_UpperCamelCase = [8, 5, 4, 2]
_UpperCamelCase = [3.0, 6.0, 12.0, 24.0]
_UpperCamelCase = 4_80_00
_UpperCamelCase = 2
_UpperCamelCase = False
_UpperCamelCase = '''time_group_norm'''
_UpperCamelCase = True
_UpperCamelCase = 1.0
_UpperCamelCase = 0.01
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
_UpperCamelCase = EncodecModel(__snake_case )
_UpperCamelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(__snake_case )
_UpperCamelCase = torch.load(__snake_case )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_UpperCamelCase = original_checkpoint['''best_state''']
recursively_load_weights(__snake_case, __snake_case, __snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_a = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 19 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __a ):
snake_case : Dict = (UnCLIPScheduler,)
def snake_case_ (self , **lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = {
"""num_train_timesteps""": 1_0_0_0,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**lowerCAmelCase__ )
return config
def snake_case_ (self ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def snake_case_ (self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def snake_case_ (self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def snake_case_ (self ):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=lowerCAmelCase__ )
def snake_case_ (self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def snake_case_ (self ):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config(variance_type="""fixed_small_log""" )
_UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def snake_case_ (self ):
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config(variance_type="""learned_range""" )
_UpperCAmelCase : str = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : int = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowerCAmelCase__ ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(4_8_7 , predicted_variance=lowerCAmelCase__ ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(9_9_9 , predicted_variance=lowerCAmelCase__ ) - -0.0_0_1_0_0_1_1 < 1e-5
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = scheduler.timesteps
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
_UpperCAmelCase : Dict = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
_UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[str] = pred_prev_sample
_UpperCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def snake_case_ (self ):
_UpperCAmelCase : int = self.scheduler_classes[0]
_UpperCAmelCase : Tuple = self.get_scheduler_config()
_UpperCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(2_5 )
_UpperCAmelCase : Optional[int] = scheduler.timesteps
_UpperCAmelCase : Optional[Any] = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
for i, t in enumerate(lowerCAmelCase__ ):
# 1. predict noise residual
_UpperCAmelCase : Optional[Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCAmelCase : int = None
else:
_UpperCAmelCase : Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[str] = scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , prev_timestep=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
_UpperCAmelCase : List[Any] = pred_prev_sample
_UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def snake_case_ (self ):
pass
def snake_case_ (self ):
pass
| 414 | 0 |
_snake_case = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _A ( __magic_name__ ):
lowercase__ = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowercase__ = 0
lowercase__ = 0
while place < len(__magic_name__ ):
if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _A ( __magic_name__ ):
lowercase__ = []
for arabic, roman in ROMAN:
((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ )
result.append(roman * factor )
if number == 0:
break
return "".join(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 |
def _A ( __magic_name__ , __magic_name__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase__ = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 611 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =RoCBertTokenizer
a : List[Any] =None
a : Tuple =False
a : str =True
a : Optional[Any] =filter_non_english
def _a ( self ):
super().setUp()
UpperCamelCase_: Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
UpperCamelCase_: str = {}
UpperCamelCase_: Dict = {}
for i, value in enumerate(_lowercase ):
UpperCamelCase_: Union[str, Any] = i
UpperCamelCase_: Tuple = i
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_lowercase , _lowercase , ensure_ascii=_lowercase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase_: int = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_lowercase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowercase ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ):
UpperCamelCase_: Tuple = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ):
UpperCamelCase_: str = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ):
UpperCamelCase_: Tuple = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: Dict = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ):
UpperCamelCase_: str = RoCBertBasicTokenizer(do_lower_case=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: str = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: Tuple = RoCBertBasicTokenizer(do_lower_case=_lowercase , strip_accents=_lowercase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ):
UpperCamelCase_: List[Any] = RoCBertBasicTokenizer(do_lower_case=_lowercase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ):
UpperCamelCase_: Dict = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
UpperCamelCase_: Tuple = {}
for i, token in enumerate(_lowercase ):
UpperCamelCase_: List[str] = i
UpperCamelCase_: Dict = RoCBertWordpieceTokenizer(vocab=_lowercase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ):
UpperCamelCase_: List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
UpperCamelCase_: str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Tuple = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCamelCase_: List[str] = tokenizer_r.encode_plus(
_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase , )
UpperCamelCase_: Optional[int] = tokenizer_r.do_lower_case if hasattr(_lowercase , 'do_lower_case' ) else False
UpperCamelCase_: int = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ):
UpperCamelCase_: List[str] = ['的', '人', '有']
UpperCamelCase_: Optional[Any] = ''.join(_lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Tuple = True
UpperCamelCase_: Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Optional[int] = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_: List[str] = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_: str = tokenizer_r.convert_ids_to_tokens(_lowercase )
UpperCamelCase_: str = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_: Optional[Any] = False
UpperCamelCase_: Any = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Optional[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
UpperCamelCase_: Any = tokenizer_r.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_: int = tokenizer_p.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_: int = tokenizer_r.convert_ids_to_tokens(_lowercase )
UpperCamelCase_: Optional[int] = tokenizer_p.convert_ids_to_tokens(_lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase_: Tuple = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_lowercase )
]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def _a ( self ):
UpperCamelCase_: List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
UpperCamelCase_: List[str] = tokenizer.encode('你好' , add_special_tokens=_lowercase )
UpperCamelCase_: Any = tokenizer.encode('你是谁' , add_special_tokens=_lowercase )
UpperCamelCase_: int = tokenizer.build_inputs_with_special_tokens(_lowercase )
UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ):
UpperCamelCase_: Dict = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCamelCase_: Any = '你好,你是谁'
UpperCamelCase_: int = tokenizer.tokenize(_lowercase )
UpperCamelCase_: Dict = tokenizer.convert_tokens_to_ids(_lowercase )
UpperCamelCase_: str = tokenizer.convert_tokens_to_shape_ids(_lowercase )
UpperCamelCase_: Any = tokenizer.convert_tokens_to_pronunciation_ids(_lowercase )
UpperCamelCase_: List[Any] = tokenizer.prepare_for_model(
_lowercase , _lowercase , _lowercase , add_special_tokens=_lowercase )
UpperCamelCase_: Optional[int] = tokenizer.encode_plus(_lowercase , add_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase ) | 57 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def UpperCAmelCase_ ( *_lowercase , **_lowercase )-> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase_ ( self , _lowercase , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Union[str, Any]:
UpperCamelCase_ = object_detector(examples[0] , threshold=0.0 )
UpperCamelCase_ = len(_lowercase )
self.assertGreater(_lowercase , 0 )
self.assertEqual(
_lowercase , [
{
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
}
for i in range(_lowercase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase_ ( self )-> str:
pass
@require_torch
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
UpperCamelCase_ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self )-> Any:
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
UpperCamelCase_ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase_ ( self )-> Optional[int]:
pass
@require_torch
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = 0.2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = 2
UpperCamelCase_ = pipeline("zero-shot-object-detection" )
UpperCamelCase_ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 628 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCamelCase: Dict ={
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCamelCase: List[str] =logging.get_logger(__name__)
class __lowercase( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ = '''maskformer'''
UpperCamelCase_ = {'''hidden_size''': '''mask_feature_size'''}
UpperCamelCase_ = ['''resnet''', '''swin''']
UpperCamelCase_ = ['''detr''']
def __init__( self : Optional[Any] , _lowerCAmelCase : int = 256 , _lowerCAmelCase : int = 256 , _lowerCAmelCase : float = 0.1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[Dict] = None , _lowerCAmelCase : Optional[Dict] = None , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : float = 20.0 , _lowerCAmelCase : Optional[bool] = None , **_lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
_lowerCAmelCase = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = backbone_config.pop('model_type' )
_lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase = config_class.from_dict(_lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
_lowerCAmelCase = DetrConfig()
else:
# verify that the decoder is supported
_lowerCAmelCase = (
decoder_config.pop('model_type' ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {",".join(self.decoders_supported )}''' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = CONFIG_MAPPING[decoder_type]
_lowerCAmelCase = config_class.from_dict(_lowerCAmelCase )
_lowerCAmelCase = backbone_config
_lowerCAmelCase = decoder_config
# main feature dimension for the model
_lowerCAmelCase = fpn_feature_size
_lowerCAmelCase = mask_feature_size
# initializer
_lowerCAmelCase = init_std
_lowerCAmelCase = init_xavier_std
# Hungarian matcher && loss
_lowerCAmelCase = cross_entropy_weight
_lowerCAmelCase = dice_weight
_lowerCAmelCase = mask_weight
_lowerCAmelCase = use_auxiliary_loss
_lowerCAmelCase = no_object_weight
_lowerCAmelCase = output_auxiliary_logits
_lowerCAmelCase = self.decoder_config.encoder_attention_heads
_lowerCAmelCase = self.decoder_config.num_hidden_layers
super().__init__(**_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : PretrainedConfig , **_lowerCAmelCase : Optional[int] ) -> str:
return cls(
backbone_config=_lowerCAmelCase , decoder_config=_lowerCAmelCase , **_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict[str, any]:
_lowerCAmelCase = copy.deepcopy(self.__dict__ )
_lowerCAmelCase = self.backbone_config.to_dict()
_lowerCAmelCase = self.decoder_config.to_dict()
_lowerCAmelCase = self.__class__.model_type
return output
| 720 |
from PIL import Image
def _a ( __SCREAMING_SNAKE_CASE : Image ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__SCREAMING_SNAKE_CASE ):
for i in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase: List[Any] =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 585 | 0 |
from __future__ import annotations
from math import pi
def lowerCamelCase__ ( _a , _a , _a):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("One and only one argument must be 0")
if inductance < 0:
raise ValueError("Inductance cannot be negative")
if frequency < 0:
raise ValueError("Frequency cannot be negative")
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 |
from __future__ import annotations
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Optional[int] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_a)
if n > 1:
factors.append(_a)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=1 )-> Tuple:
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> Dict:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCamelCase = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCamelCase = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCamelCase = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCamelCase = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCamelCase = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=0 )-> List[str]:
UpperCamelCase = []
for old_item in old_list:
UpperCamelCase = old_item
UpperCamelCase = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCamelCase = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCamelCase = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCamelCase = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCamelCase = shave_segments(__UpperCamelCase , n_shave_prefix_segments=__UpperCamelCase )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None )-> str:
assert isinstance(__UpperCamelCase , __UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase = old_checkpoint[path]
UpperCamelCase = old_tensor.shape[0] // 3
UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase = query.reshape(__UpperCamelCase )
UpperCamelCase = key.reshape(__UpperCamelCase )
UpperCamelCase = value.reshape(__UpperCamelCase )
for path in paths:
UpperCamelCase = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCamelCase = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCamelCase = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase = old_checkpoint[path["""old"""]]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = checkpoint["""time_embed.0.weight"""]
UpperCamelCase = checkpoint["""time_embed.0.bias"""]
UpperCamelCase = checkpoint["""time_embed.2.weight"""]
UpperCamelCase = checkpoint["""time_embed.2.bias"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase = checkpoint["""out.0.weight"""]
UpperCamelCase = checkpoint["""out.0.bias"""]
UpperCamelCase = checkpoint["""out.2.weight"""]
UpperCamelCase = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the output blocks only
UpperCamelCase = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCamelCase = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(__UpperCamelCase )
}
for i in range(1 , __UpperCamelCase ):
UpperCamelCase = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
UpperCamelCase = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
UpperCamelCase = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
UpperCamelCase = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=__UpperCamelCase )
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase , )
UpperCamelCase = middle_blocks[0]
UpperCamelCase = middle_blocks[1]
UpperCamelCase = middle_blocks[2]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , config=__UpperCamelCase )
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , attention_paths_to_split=__UpperCamelCase , config=__UpperCamelCase )
for i in range(__UpperCamelCase ):
UpperCamelCase = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase = [shave_segments(__UpperCamelCase , 2 ) for name in output_blocks[i]]
UpperCamelCase = {}
for layer in output_block_layers:
UpperCamelCase ,UpperCamelCase = layer.split(""".""" )[0], shave_segments(__UpperCamelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(__UpperCamelCase )
else:
UpperCamelCase = [layer_name]
if len(__UpperCamelCase ) > 1:
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
UpperCamelCase = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = renew_resnet_paths(__UpperCamelCase )
UpperCamelCase = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
UpperCamelCase = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(__UpperCamelCase ) == 2:
UpperCamelCase = []
if len(__UpperCamelCase ):
UpperCamelCase = renew_attention_paths(__UpperCamelCase )
UpperCamelCase = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
UpperCamelCase = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=__UpperCamelCase , )
else:
UpperCamelCase = renew_resnet_paths(__UpperCamelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase = """.""".join(["""output_blocks""", str(__UpperCamelCase ), path["""old"""]] )
UpperCamelCase = """.""".join(["""up_blocks""", str(__UpperCamelCase ), """resnets""", str(__UpperCamelCase ), path["""new"""]] )
UpperCamelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ = json.loads(f.read())
SCREAMING_SNAKE_CASE__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 35 | 0 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase__ = logging.get_logger(__name__)
lowercase__ = 'T5Config'
def __snake_case ( lowercase : Dict , lowercase : int , lowercase : Optional[int] ):
snake_case_ = jnp.zeros_like(_A )
snake_case_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
snake_case_ = shifted_input_ids.at[:, 0].set(_A )
snake_case_ = jnp.where(shifted_input_ids == -100 , _A , _A )
return shifted_input_ids
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
snake_case = """mt5"""
snake_case = MTaConfig
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
snake_case = """mt5"""
snake_case = MTaConfig
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
snake_case = """mt5"""
snake_case = MTaConfig
| 508 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = IFPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowercase_ ( self : List[Any] ):
return self._get_dummy_components()
def lowercase_ ( self : int , __snake_case : List[str] , __snake_case : List[Any]=0 ):
if str(__snake_case ).startswith('mps' ):
a : List[Any] = torch.manual_seed(__snake_case )
else:
a : str = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase_ ( self : Union[str, Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : str ):
self._test_save_load_local()
def lowercase_ ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Tuple ):
# if
a : List[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
a : Optional[int] = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__snake_case , tokenizer=__snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
a , a : Optional[Any] = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : List[Any] = None
a : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : List[str] = IFImgaImgPipeline(**pipe_a.components )
a : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Tuple = IFInpaintingPipeline(**pipe_a.components )
a : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[str] ):
# pipeline 1
_start_torch_memory_measurement()
a : int = torch.Generator(device='cpu' ).manual_seed(0 )
a : int = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : Dict = output.images[0]
assert image.shape == (64, 64, 3)
a : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Any = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : Tuple = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : List[str] , __snake_case : str , __snake_case : int , __snake_case : Any , __snake_case : Any ):
# pipeline 1
_start_torch_memory_measurement()
a : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Any = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
a : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Dict = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : Optional[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : List[Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : str , __snake_case : int ):
# pipeline 1
_start_torch_memory_measurement()
a : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__snake_case )
a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type='np' , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
a : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
a : Optional[int] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__snake_case )
a : int = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='np' , )
a : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowerCamelCase__ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 526 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Tuple = """blenderbot-small"""
lowercase__ : List[Any] = ["""past_key_values"""]
lowercase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , UpperCamelCase__=5_02_65 , UpperCamelCase__=5_12 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=8 , UpperCamelCase__=20_48 , UpperCamelCase__=16 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=5_12 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1 , UpperCamelCase__=False , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=2 , **UpperCamelCase__ , ):
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = use_cache
A__ = encoder_layers
A__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ = {0: "batch"}
A__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ = {0: "batch", 1: "decoder_sequence"}
A__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super().outputs
else:
A__ = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A__ , A__ = self.num_layers
for i in range(UpperCamelCase__ ):
A__ = {0: "batch", 2: "past_sequence + sequence"}
A__ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A__ = seq_length if not self.use_past else 1
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
A__ = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
A__ = common_inputs["decoder_input_ids"].shape[1]
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = decoder_seq_length + 3
A__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ = self.num_layers
A__ = min(UpperCamelCase__ , UpperCamelCase__ )
A__ = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A__ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A__ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ = seqlen + 2
A__ , A__ = self.num_layers
A__ , A__ = self.num_attention_heads
A__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ = common_inputs["attention_mask"].dtype
A__ = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A__ = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A__ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A__ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
A__ = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A__ = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
A__ = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) | 261 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCAmelCase ="""base_with_context"""
def __a ( A , A ) -> str:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Dict:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = ly_weight["attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __a ( A , A ) -> Union[str, Any]:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=A )
A__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ = weights[f"""layers_{lyr_num}"""]
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
A__ = ly_weight["self_attention"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = ly_weight["MultiHeadDotProductAttention_0"]
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __a ( A ) -> str:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ = jnp.tree_util.tree_map(onp.array , A )
A__ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
A__ = inference.parse_training_gin_file(A , A )
A__ = inference.InferenceModel(args.checkpoint_path , A )
A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
A__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , A )
A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , A )
A__ = load_decoder(ta_checkpoint["target"]["decoder"] , A )
A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
A__ = SpectrogramDiffusionPipeline(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
__UpperCAmelCase =parser.parse_args()
main(args) | 261 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A ( snake_case__ : List[str] , snake_case__ : List[Any]=False ) -> int:
'''simple docstring'''
try:
__snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__snake_case = default
else:
# KEY is set, convert it to True or False.
try:
__snake_case = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
UpperCAmelCase__ : Any = parse_flag_from_env("RUN_SLOW", default=False)
UpperCAmelCase__ : str = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCAmelCase__ : Union[str, Any] = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCAmelCase__ : List[str] = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCAmelCase__ : Any = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCAmelCase__ : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCAmelCase__ : str = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCAmelCase__ : str = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ",
)
# Beam
UpperCAmelCase__ : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCAmelCase__ : Tuple = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCAmelCase__ : int = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A ( snake_case__ : Optional[Any] ) -> str:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__snake_case = unittest.skip('test requires faiss' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__snake_case = unittest.skip('test requires regex' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : int ) -> Optional[int]:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__snake_case = unittest.skip('test requires elasticsearch' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Any ) -> Union[str, Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__snake_case = unittest.skip('test requires sqlalchemy' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : List[Any] ) -> str:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__snake_case = unittest.skip('test requires PyTorch' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if not config.TF_AVAILABLE:
__snake_case = unittest.skip('test requires TensorFlow' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
if not config.JAX_AVAILABLE:
__snake_case = unittest.skip('test requires JAX' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Any ) -> Tuple:
'''simple docstring'''
if not config.PIL_AVAILABLE:
__snake_case = unittest.skip('test requires Pillow' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Union[str, Any] ) -> Any:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(lowerCamelCase_ )
else:
return test_case
def A ( snake_case__ : Dict ) -> List[str]:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(lowerCamelCase_ )
else:
return test_case
def A ( snake_case__ : List[str] ) -> List[str]:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(lowerCamelCase_ )
else:
return test_case
def A ( snake_case__ : str ) -> Dict:
'''simple docstring'''
def _require_spacy_model(snake_case__ : Dict ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip('test requires spacy' )(lowerCamelCase_ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def A ( snake_case__ : Dict ) -> Dict:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(lowerCamelCase_ )
else:
return test_case
def A ( snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(lowerCamelCase_ )
else:
return test_case
def A ( snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__snake_case = unittest.skip('test is slow' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Any ) -> Tuple:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__snake_case = unittest.skip('test is local' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Dict ) -> Dict:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__snake_case = unittest.skip('test is packaged' )(lowerCamelCase_ )
return test_case
def A ( snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__snake_case = unittest.skip('test requires remote' )(lowerCamelCase_ )
return test_case
def A ( *snake_case__ : Union[str, Any] ) -> str:
'''simple docstring'''
def decorate(cls : int ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith('test' ):
for decorator in decorators:
__snake_case = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@contextmanager
def A ( snake_case__ : int=OfflineSimulationMode.CONNECTION_FAILS , snake_case__ : Dict=1e-16 ) -> Optional[Any]:
'''simple docstring'''
__snake_case = requests.Session().request
def timeout_request(snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : str , **snake_case__ : List[Any] ):
# Change the url to an invalid url so that the connection hangs
__snake_case = "https://10.255.255.1"
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
__snake_case = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__snake_case = url
__snake_case = e.args[0]
__snake_case = (max_retry_error.args[0].replace('10.255.255.1' , f"OfflineMock[{url}]" ),)
__snake_case = (max_retry_error,)
raise
def raise_connection_error(snake_case__ : Tuple , snake_case__ : str , **snake_case__ : int ):
raise requests.ConnectionError('Offline mode is enabled.' , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , lowerCamelCase_ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def A ( *snake_case__ : List[str] , **snake_case__ : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def A ( ) -> List[str]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A ( ) -> List[Any]:
'''simple docstring'''
import gc
gc.collect()
__snake_case = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A ( snake_case__ : Tuple , snake_case__ : Dict ) -> Dict:
'''simple docstring'''
return deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist()
def A ( snake_case__ : List[Any] ) -> List[Any]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case__ : Dict , *snake_case__ : Tuple , **snake_case__ : Any ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith('500' ) or str(lowerCamelCase_ ).startswith('502' ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class __lowercase :
def __init__( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]:
__snake_case = returncode
__snake_case = stdout
__snake_case = stderr
async def A ( snake_case__ : str , snake_case__ : Optional[Any] ) -> int:
'''simple docstring'''
while True:
__snake_case = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def A ( snake_case__ : Any , snake_case__ : Dict=None , snake_case__ : Union[str, Any]=None , snake_case__ : int=None , snake_case__ : str=False , snake_case__ : Optional[Any]=False ) -> Dict:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase_ ) )
__snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__snake_case = []
__snake_case = []
def tee(snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Union[str, Any]="" ):
__snake_case = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda snake_case__ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label='stderr:' ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def A ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , snake_case__ : List[str]=180 , snake_case__ : Dict=False , snake_case__ : Optional[Any]=True ) -> str:
'''simple docstring'''
__snake_case = asyncio.get_event_loop()
__snake_case = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
__snake_case = " ".join(lowerCamelCase_ )
if result.returncode > 0:
__snake_case = "\n".join(result.stderr )
raise RuntimeError(
f"\'{cmd_str}\' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"\'{cmd_str}\' produced no output." )
return result
def A ( ) -> Dict:
'''simple docstring'''
__snake_case = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__snake_case = re.sub(r'^gw' , '' , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def A ( ) -> int:
'''simple docstring'''
__snake_case = 2_9500
__snake_case = pytest_xdist_worker_id()
return port + uniq_delta
| 313 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _snake_case ( _a , _a , _a , unittest.TestCase ):
_A : Dict = StableUnCLIPPipeline
_A : Any = TEXT_TO_IMAGE_PARAMS
_A : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_A : List[Any] = False
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Dict = 32
SCREAMING_SNAKE_CASE:List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=SCREAMING_SNAKE_CASE__ ,projection_dim=SCREAMING_SNAKE_CASE__ ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:int = PriorTransformer(
num_attention_heads=2 ,attention_head_dim=12 ,embedding_dim=SCREAMING_SNAKE_CASE__ ,num_layers=1 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Dict = DDPMScheduler(
variance_type="fixed_small_log" ,prediction_type="sample" ,num_train_timesteps=1_000 ,clip_sample=SCREAMING_SNAKE_CASE__ ,clip_sample_range=5.0 ,beta_schedule="squaredcos_cap_v2" ,)
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=SCREAMING_SNAKE_CASE__ ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:str = UNetaDConditionModel(
sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") ,up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") ,block_out_channels=(32, 64) ,attention_head_dim=(2, 4) ,class_embed_type="projection" ,projection_class_embeddings_input_dim=embedder_projection_dim * 2 ,cross_attention_dim=SCREAMING_SNAKE_CASE__ ,layers_per_block=1 ,upcast_attention=SCREAMING_SNAKE_CASE__ ,use_linear_projection=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = DDIMScheduler(
beta_schedule="scaled_linear" ,beta_start=0.00_085 ,beta_end=0.012 ,prediction_type="v_prediction" ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:List[str] = AutoencoderKL()
SCREAMING_SNAKE_CASE:Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:int = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE:Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" ,torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE:Dict = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[int] = pipe("anime turle" ,generator=SCREAMING_SNAKE_CASE__ ,output_type="np" )
SCREAMING_SNAKE_CASE:Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE:Tuple = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE:List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE:Any = pipe(
"anime turtle" ,prior_num_inference_steps=2 ,num_inference_steps=2 ,output_type="np" ,)
SCREAMING_SNAKE_CASE:Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 708 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def A_ ( snake_case ):
@wraps(snake_case )
def _inner_fn(*snake_case , **snake_case ):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , snake_case , )
return fn(*snake_case , **snake_case )
return _inner_fn
| 465 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_a : Optional[Any] = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n'
_a : str = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n'
_a : Optional[int] = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ),codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""],reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__lowerCAmelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase = evaluate(dataset=lowerCAmelCase__,predictions=lowerCAmelCase__ )
return score
| 689 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def a__ ( ) -> Tuple:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCamelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def a__ ( ) -> Tuple:
"""simple docstring"""
assert _test_patching.open is open
_UpperCamelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, '''open''', lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching, '''pandas.read_csv''', lowercase ):
pass
def a__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, '''len''', lowercase ) is None
with patch_submodule(_test_patching, '''len''', lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCamelCase = patch_submodule(_test_patching, '''open''', lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def a__ ( ) -> List[str]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCamelCase = '''__test_patch_submodule_successive_join__'''
_UpperCamelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCamelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, '''os.rename''', lowercase ):
with patch_submodule(_test_patching, '''os.path.join''', lowercase ):
with patch_submodule(_test_patching, '''os.path.dirname''', lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def a__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching, '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''', lowercase ):
pass
with patch_submodule(_test_patching, '''os.__attribute_that_doesn_exist__''', lowercase ):
pass
| 98 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Dict = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class snake_case_ ( _A ):
'''simple docstring'''
lowerCamelCase = "umt5"
lowerCamelCase = ["past_key_values"]
def __init__( self : Union[str, Any] , __magic_name__ : Optional[int]=25_0112 , __magic_name__ : Optional[Any]=512 , __magic_name__ : Dict=64 , __magic_name__ : Dict=1024 , __magic_name__ : Optional[Any]=8 , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=6 , __magic_name__ : List[Any]=32 , __magic_name__ : Tuple=128 , __magic_name__ : Dict=0.1 , __magic_name__ : Union[str, Any]=1e-6 , __magic_name__ : Tuple=1.0 , __magic_name__ : int="gated-gelu" , __magic_name__ : Optional[int]=True , __magic_name__ : int=True , __magic_name__ : Tuple="T5Tokenizer" , __magic_name__ : Union[str, Any]=True , __magic_name__ : Dict=0 , __magic_name__ : Dict=1 , __magic_name__ : str=0 , **__magic_name__ : Optional[int] , ) -> Optional[Any]:
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Any = d_model
lowerCamelCase_ : Optional[Any] = d_kv
lowerCamelCase_ : Dict = d_ff
lowerCamelCase_ : Dict = num_layers
lowerCamelCase_ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase_ : Union[str, Any] = num_heads
lowerCamelCase_ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase_ : Any = relative_attention_max_distance
lowerCamelCase_ : List[Any] = dropout_rate
lowerCamelCase_ : Optional[int] = layer_norm_epsilon
lowerCamelCase_ : Optional[int] = initializer_factor
lowerCamelCase_ : Dict = feed_forward_proj
lowerCamelCase_ : List[str] = use_cache
lowerCamelCase_ : Any = self.feed_forward_proj.split("-" )
lowerCamelCase_ : Any = act_info[-1]
lowerCamelCase_ : Optional[int] = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCamelCase_ : Any = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
return self.num_layers
class snake_case_ ( _A ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
lowerCamelCase_ : List[str] = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCamelCase_ : Tuple = "past_encoder_sequence + sequence"
lowerCamelCase_ : str = {0: "batch"}
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCamelCase_ : List[str] = {0: "batch", 1: "decoder_sequence"}
lowerCamelCase_ : Optional[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float:
return 5e-4
| 713 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = "informer"
lowerCamelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[Any] , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[int] = None , __magic_name__ : str = "student_t" , __magic_name__ : str = "nll" , __magic_name__ : int = 1 , __magic_name__ : List[int] = None , __magic_name__ : Optional[Union[str, bool]] = "mean" , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : int = 0 , __magic_name__ : Optional[List[int]] = None , __magic_name__ : Optional[List[int]] = None , __magic_name__ : int = 64 , __magic_name__ : int = 32 , __magic_name__ : int = 32 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : bool = True , __magic_name__ : str = "gelu" , __magic_name__ : float = 0.05 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : float = 0.1 , __magic_name__ : int = 100 , __magic_name__ : float = 0.02 , __magic_name__ : Optional[int]=True , __magic_name__ : str = "prob" , __magic_name__ : int = 5 , __magic_name__ : bool = True , **__magic_name__ : Tuple , ) -> List[str]:
# time series specific configuration
lowerCamelCase_ : Tuple = prediction_length
lowerCamelCase_ : str = context_length or prediction_length
lowerCamelCase_ : Union[str, Any] = distribution_output
lowerCamelCase_ : List[str] = loss
lowerCamelCase_ : Tuple = input_size
lowerCamelCase_ : int = num_time_features
lowerCamelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ : Optional[int] = scaling
lowerCamelCase_ : str = num_dynamic_real_features
lowerCamelCase_ : List[str] = num_static_real_features
lowerCamelCase_ : Any = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = cardinality
else:
lowerCamelCase_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__magic_name__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowerCamelCase_ : Dict = embedding_dimension
else:
lowerCamelCase_ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ : Dict = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_attention_heads
lowerCamelCase_ : int = decoder_attention_heads
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_ffn_dim
lowerCamelCase_ : Dict = encoder_layers
lowerCamelCase_ : str = decoder_layers
lowerCamelCase_ : Dict = dropout
lowerCamelCase_ : Optional[int] = attention_dropout
lowerCamelCase_ : Dict = activation_dropout
lowerCamelCase_ : List[Any] = encoder_layerdrop
lowerCamelCase_ : Optional[Any] = decoder_layerdrop
lowerCamelCase_ : Optional[int] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : str = use_cache
# Informer
lowerCamelCase_ : str = attention_type
lowerCamelCase_ : Union[str, Any] = sampling_factor
lowerCamelCase_ : List[Any] = distil
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 253 | 0 |
import math
def _A (UpperCamelCase : list , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Tuple = end or len(UpperCamelCase )
for i in range(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase__ : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
lowerCamelCase__ : Dict = temp_index_value
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) ->None: # Max Heap
'''simple docstring'''
lowerCamelCase__ : Any = index
lowerCamelCase__ : int = 2 * index + 1 # Left Node
lowerCamelCase__ : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase__ : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase__ : List[Any] = right_index
if largest != index:
lowerCamelCase__ ,lowerCamelCase__ : Any = array[largest], array[index]
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase__ ,lowerCamelCase__ : int = array[0], array[i]
heapify(UpperCamelCase , 0 , UpperCamelCase )
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = low
lowerCamelCase__ : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase__ ,lowerCamelCase__ : Dict = array[j], array[i]
i += 1
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
if len(UpperCamelCase ) == 0:
return array
lowerCamelCase__ : Any = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) )
lowerCamelCase__ : Union[str, Any] = 16
return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase )
max_depth -= 1
lowerCamelCase__ : Tuple = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase__ : Optional[int] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = p
return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by a comma : ''').strip()
_lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 157 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase :Tuple = IFInpaintingSuperResolutionPipeline
UpperCamelCase :int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase :Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
UpperCamelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _snake_case (self ):
return self._get_superresolution_dummy_components()
def _snake_case (self , __magic_name__ , __magic_name__=0 ):
if str(__magic_name__ ).startswith("""mps""" ):
lowerCamelCase__ : Dict = torch.manual_seed(__magic_name__ )
else:
lowerCamelCase__ : Tuple = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
lowerCamelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case (self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _snake_case (self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _snake_case (self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _snake_case (self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _snake_case (self ):
self._test_save_load_local()
def _snake_case (self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 157 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_a : List[str] = "src/transformers"
_a : Dict = "docs/source/en"
_a : Optional[Any] = "."
def _a (lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Dict ) -> List[Any]:
"""simple docstring"""
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__snake_case = f.readlines()
# Find the start prompt.
__snake_case = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
__snake_case = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_a : Optional[int] = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_a : str = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_a : Tuple = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_a : Optional[Any] = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_a : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def _a (lowercase__ : Any ) -> Any:
"""simple docstring"""
__snake_case = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase__ )
return [m.group(0 ) for m in matches]
def _a (lowercase__ : Optional[Any] , lowercase__ : Optional[int] ) -> Any:
"""simple docstring"""
__snake_case = 2 if text == '✅' or text == '❌' else len(lowercase__ )
__snake_case = (width - text_length) // 2
__snake_case = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _a () -> List[str]:
"""simple docstring"""
__snake_case = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__snake_case = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__snake_case = collections.defaultdict(lowercase__ )
__snake_case = collections.defaultdict(lowercase__ )
__snake_case = collections.defaultdict(lowercase__ )
__snake_case = collections.defaultdict(lowercase__ )
__snake_case = collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowercase__ ):
__snake_case = None
if attr_name.endswith('Tokenizer' ):
__snake_case = slow_tokenizers
__snake_case = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__snake_case = fast_tokenizers
__snake_case = attr_name[:-1_3]
elif _re_tf_models.match(lowercase__ ) is not None:
__snake_case = tf_models
__snake_case = _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
__snake_case = flax_models
__snake_case = _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
__snake_case = pt_models
__snake_case = _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__snake_case = True
break
# Try again after removing the last word in the name
__snake_case = ''.join(camel_case_split(lowercase__ )[:-1] )
# Let's build that table!
__snake_case = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__snake_case = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__snake_case = [len(lowercase__ ) + 2 for c in columns]
__snake_case = max([len(lowercase__ ) for name in model_names] ) + 2
# Build the table per se
__snake_case = '|' + '|'.join([_center_text(lowercase__ , lowercase__ ) for c, w in zip(lowercase__ , lowercase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__snake_case = {True: '✅', False: '❌'}
for name in model_names:
__snake_case = model_name_to_prefix[name]
__snake_case = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowercase__ , lowercase__ ) for l, w in zip(lowercase__ , lowercase__ )] ) + "|\n"
return table
def _a (lowercase__ : str=False ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = _find_text_in_file(
filename=os.path.join(lowercase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__snake_case = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowercase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_a : Optional[int] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 56 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
_a : Optional[Any] = 100
_a : Dict = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def _a (lowercase__ : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__snake_case = set()
__snake_case = 42
__snake_case = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _a (lowercase__ : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , lowercase__ ):
if len(partition(lowercase__ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 56 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ : Any = "document_qa"
lowerCAmelCase__ : Tuple = AutoProcessor
lowerCAmelCase__ : int = VisionEncoderDecoderModel
lowerCAmelCase__ : Optional[int] = ["image", "text"]
lowerCAmelCase__ : Optional[Any] = ["text"]
def __init__( self : int , *snake_case : List[Any] , **snake_case : Optional[Any] ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*snake_case , **snake_case )
def snake_case ( self : Union[str, Any] , snake_case : "Image" , snake_case : str ):
__UpperCamelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
__UpperCamelCase = task_prompt.replace('''{user_input}''' , snake_case )
__UpperCamelCase = self.pre_processor.tokenizer(
snake_case , add_special_tokens=snake_case , return_tensors='''pt''' ).input_ids
__UpperCamelCase = self.pre_processor(snake_case , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def snake_case ( self : str , snake_case : Union[str, Any] ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=snake_case , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=snake_case , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=snake_case , ).sequences
def snake_case ( self : int , snake_case : List[str] ):
__UpperCamelCase = self.pre_processor.batch_decode(snake_case )[0]
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
__UpperCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
__UpperCamelCase = re.sub(R'''<.*?>''' , '''''' , snake_case , count=1 ).strip() # remove first task start token
__UpperCamelCase = self.pre_processor.tokenajson(snake_case )
return sequence["answer"]
| 717 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Any = ""
lowerCAmelCase__ : Any = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : str , snake_case : Optional[DatasetInfo] = None , snake_case : Optional[str] = None , **snake_case : List[Any] , ):
super().__init__(self , **snake_case )
__UpperCamelCase = repo_info
__UpperCamelCase = token
__UpperCamelCase = None
def snake_case ( self : List[Any] ):
if self.dir_cache is None:
__UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(snake_case ): {'''name''': str(snake_case ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case ( self : Dict , snake_case : str , snake_case : str = "rb" , **snake_case : Union[str, Any] , ):
if not isinstance(self.repo_info , snake_case ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__UpperCamelCase = hf_hub_url(self.repo_info.id , snake_case , revision=self.repo_info.sha )
return fsspec.open(
snake_case , mode=snake_case , headers=get_authentication_headers_for_url(snake_case , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case ( self : Optional[Any] , snake_case : Tuple , **snake_case : List[Any] ):
self._get_dirs()
__UpperCamelCase = self._strip_protocol(snake_case )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(snake_case )
def snake_case ( self : List[str] , snake_case : int , snake_case : Tuple=False , **snake_case : Dict ):
self._get_dirs()
__UpperCamelCase = PurePosixPath(path.strip('''/''' ) )
__UpperCamelCase = {}
for p, f in self.dir_cache.items():
__UpperCamelCase = PurePosixPath(p.strip('''/''' ) )
__UpperCamelCase = p.parent
if root == path:
__UpperCamelCase = f
__UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 375 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class __snake_case :
def __init__( self ):
'''simple docstring'''
lowercase : Dict = {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowercase_ )
if nodea not in self.connections:
self.add_node(lowercase_ )
lowercase : int = probability
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return list(self.connections )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = 0
lowercase : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> dict[str, int]:
lowercase : List[str] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case__ , snake_case__ , snake_case__ )
lowercase : List[str] = Counter(graph.get_nodes() )
lowercase : Union[str, Any] = start
for _ in range(snake_case__ ):
lowercase : int = graph.transition(snake_case__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def _a ( self , lowercase_) -> int:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_ , config_name=lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_ , config_name=lowercase_)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , lowercase_)
def _a ( self) -> Optional[int]:
__snake_case = AutoConfig.from_pretrained('gpt2')
__snake_case = GenerationConfig.from_model_config(lowercase_)
__snake_case = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowercase_ , lowercase_)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _a ( self) -> str:
__snake_case = GenerationConfig()
__snake_case = {
'max_new_tokens': 1_0_2_4,
'foo': 'bar',
}
__snake_case = copy.deepcopy(lowercase_)
__snake_case = generation_config.update(**lowercase_)
# update_kwargs was not modified (no side effects)
self.assertEqual(lowercase_ , lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowercase_ , {'foo': 'bar'})
def _a ( self) -> Optional[Any]:
__snake_case = GenerationConfig()
__snake_case = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
__snake_case = GenerationConfig.from_model_config(lowercase_)
assert not hasattr(lowercase_ , 'foo') # no new kwargs should be initialized if from config
def _a ( self) -> Optional[Any]:
__snake_case = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , lowercase_)
self.assertEqual(default_config.num_beams , 1)
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , lowercase_)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowercase_)
__snake_case = GenerationConfig.from_pretrained(lowercase_ , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , lowercase_)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def _a ( cls) -> List[str]:
__snake_case = TOKEN
HfFolder.save_token(lowercase_)
@classmethod
def _a ( cls) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def _a ( self) -> List[Any]:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='test-generation-config' , push_to_hub=lowercase_ , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
def _a ( self) -> str:
__snake_case = GenerationConfig(
do_sample=lowercase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowercase_ , repo_id='valid_org/test-generation-config-org' , push_to_hub=lowercase_ , use_auth_token=self._token)
__snake_case = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowercase_ , getattr(lowercase_ , lowercase_))
| 313 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
1_0: """a""",
1_1: """b""",
1_2: """c""",
1_3: """d""",
1_4: """e""",
1_5: """f""",
}
def __lowerCAmelCase ( __lowerCAmelCase : float ) -> str:
assert type(__lowerCAmelCase ) in (int, float) and decimal == int(__lowerCAmelCase )
_UpperCamelCase : Optional[Any] = int(__lowerCAmelCase )
_UpperCamelCase : Optional[int] = ""
_UpperCamelCase : List[str] = False
if decimal < 0:
_UpperCamelCase : List[str] = True
decimal *= -1
while decimal > 0:
_UpperCamelCase , _UpperCamelCase : str = divmod(__lowerCAmelCase , 16 )
_UpperCamelCase : Any = values[remainder] + hexadecimal
_UpperCamelCase : Dict = "0x" + hexadecimal
if negative:
_UpperCamelCase : int = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | 0 |
def lowerCamelCase_ ( _UpperCamelCase = 1_000 ) -> int:
"""simple docstring"""
snake_case_ : Any = -1
snake_case_ : Optional[int] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
snake_case_ : str = (n * n - 2 * a * n) // (2 * n - 2 * a)
snake_case_ : Dict = n - a - b
if c * c == (a * a + b * b):
snake_case_ : Any = a * b * c
if candidate >= product:
snake_case_ : int = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : List[str] = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 354 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ :int = None
a_ :List[Any] = logging.get_logger(__name__)
a_ :Tuple = '''▁'''
a_ :Tuple = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a_ :List[str] = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
a_ :Tuple = {
'''google/pegasus-xsum''': 5_12,
}
class lowercase ( UpperCAmelCase__ ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = PegasusTokenizer
lowerCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : int , _lowercase : Optional[int]=None , _lowercase : List[str]=None , _lowercase : Union[str, Any]="<pad>" , _lowercase : List[Any]="</s>" , _lowercase : List[Any]="<unk>" , _lowercase : Dict="<mask_2>" , _lowercase : Any="<mask_1>" , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=1_03 , **_lowercase : Dict , ):
SCREAMING_SNAKE_CASE__ : List[str] = offset
if additional_special_tokens is not None:
if not isinstance(_lowercase , _lowercase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(_lowercase )}, but is"""
f""" {type(_lowercase )}""" )
SCREAMING_SNAKE_CASE__ : List[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(_lowercase ) , self.offset - 1 )
]
if len(set(_lowercase ) ) != len(_lowercase ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE__ : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
_lowercase , tokenizer_file=_lowercase , pad_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , mask_token=_lowercase , mask_token_sent=_lowercase , offset=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Dict = vocab_file
SCREAMING_SNAKE_CASE__ : Optional[Any] = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase__ ( self : str , _lowercase : List , _lowercase : Optional[List] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(_lowercase )
elif token_ids_a is None:
return self._special_token_mask(_lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase__ ( self : int , _lowercase : int , _lowercase : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : int , _lowercase : str , _lowercase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ : str = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 35 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 694 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase : Any = """CompVis/stable-diffusion-v1-1"""
lowercase : Any = """CompVis/stable-diffusion-v1-2"""
lowercase : List[str] = """CompVis/stable-diffusion-v1-3"""
lowercase : str = """CompVis/stable-diffusion-v1-4"""
class __A( __UpperCAmelCase ):
def __init__( self, A, A, A, A, A, A, A, A = True, ):
"""simple docstring"""
super()._init_()
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(A )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(A )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(A )
_UpperCamelCase = StableDiffusionPipeline(
vae=A, text_encoder=A, tokenizer=A, unet=A, scheduler=A, safety_checker=A, feature_extractor=A, requires_safety_checker=A, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _UpperCamelCase ( self ):
"""simple docstring"""
return {k: getattr(self, A ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCamelCase ( self, A = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(A )
@torch.no_grad()
def _UpperCamelCase ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def _UpperCamelCase ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def _UpperCamelCase ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def _UpperCamelCase ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
return self.pipea(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
@torch.no_grad()
def _UpperCamelCase ( self, A, A = 512, A = 512, A = 50, A = 7.5, A = None, A = 1, A = 0.0, A = None, A = None, A = "pil", A = True, A = None, A = 1, **A, ):
"""simple docstring"""
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase = self.textaimg_sda_a(
prompt=A, height=A, width=A, num_inference_steps=A, guidance_scale=A, negative_prompt=A, num_images_per_prompt=A, eta=A, generator=A, latents=A, output_type=A, return_dict=A, callback=A, callback_steps=A, **A, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 105 |
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __A( unittest.TestCase ):
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = 250
_UpperCamelCase = ids_tensor((batch_size, length), A )
_UpperCamelCase = torch.ones((batch_size, length), device=A, dtype=torch.float ) / length
return input_ids, scores
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaxLengthCriteria(max_length=10 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase , _UpperCamelCase = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
_UpperCamelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 10 )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self._get_tensors(5 )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A, A ) )
_UpperCamelCase = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A, A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 10 )
with self.assertWarns(A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 11 )
_UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList(), 11 )
self.assertEqual(len(A ), 1 )
| 105 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 2 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 209 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase: Optional[int] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowercase: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 | from __future__ import annotations
class lowerCamelCase__ :
def __init__( self : List[str] , lowercase__ : list[list[int]] ):
_lowerCAmelCase = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(lowercase__ ) != 0:
_lowerCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowercase__ ) != cols:
raise error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise error
_lowerCAmelCase = rows
else:
_lowerCAmelCase = []
def SCREAMING_SNAKE_CASE__ ( self : int ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return len(self.rows )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return len(self.rows[0] )
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (self.num_rows, self.num_columns)
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.order[0] == self.order[1]
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return bool(self.determinant() )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : int , lowercase__ : int ):
_lowerCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowercase__ ).determinant()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : int , lowercase__ : int ):
if (row + column) % 2 == 0:
return self.get_minor(lowercase__ , lowercase__ )
return -1 * self.get_minor(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return Matrix(
[
[self.get_minor(lowercase__ , lowercase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
return str(self.rows )
def __str__( self : List[Any] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(lowercase__ ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : list[int] , lowercase__ : int | None = None ):
_lowerCAmelCase = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in row:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(lowercase__ )
else:
_lowerCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : list[int] , lowercase__ : int | None = None ):
_lowerCAmelCase = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(lowercase__ , lowercase__ ):
raise type_error
for value in column:
if not isinstance(lowercase__ , (int, float) ):
raise type_error
if len(lowercase__ ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
_lowerCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_lowerCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : str , lowercase__ : object ):
if not isinstance(lowercase__ , lowercase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , lowercase__ : object ):
return not self == other
def __neg__( self : Any ):
return self * -1
def __add__( self : Tuple , lowercase__ : Matrix ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any , lowercase__ : Matrix ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , lowercase__ : Matrix | int | float ):
if isinstance(lowercase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowercase__ , lowercase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(lowercase__ , lowercase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self : List[str] , lowercase__ : int ):
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
_lowerCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] , lowercase__ : list[int] , lowercase__ : list[int] ):
return sum(row[i] * column[i] for i in range(len(lowercase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225 | 0 |
'''simple docstring'''
_lowerCamelCase = 8.3_14_45_98
def a__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_lowerCamelCase = 300
_lowerCamelCase = 28
_lowerCamelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 71 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 1 |
"""simple docstring"""
import argparse
import os
import re
lowercase__ : List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowercase__ : Dict = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
lowercase__ : Dict = re.compile(r'''\s*\(\s*\"(\S[^\"]+)\"''')
def __lowercase ( _a , _a = False ):
with open(_a , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : List[Any] = f.read()
snake_case_ : Optional[int] = content.split('''\n''' )
snake_case_ : Optional[Any] = []
snake_case_ : str = 0
while line_idx < len(_a ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case_ : List[Any] = len(re.search(r'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case_ : Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case_ : Tuple = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case_ : List[str] = sorted(_a , key=lambda _a : _re_identifier.search(_a ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_a , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_a ) )
elif "\n".join(_a ) != content:
return True
def __lowercase ( _a = False ):
snake_case_ : Tuple = [os.path.join(_a , _a ) for f in os.listdir(_a ) if f.endswith('''.py''' )]
snake_case_ : Tuple = [sort_auto_mapping(_a , overwrite=_a ) for fname in fnames]
if not overwrite and any(_a ):
snake_case_ : Tuple = [f for f, d in zip(_a , _a ) if d]
raise ValueError(
f"The following files have auto mappings that need sorting: {', '.join(_a )}. Run `make style` to fix"
''' this.''' )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowercase__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 701 |
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_, snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 485 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self ,__snake_case ,__snake_case=7 ,__snake_case=3 ,__snake_case=1_8 ,__snake_case=3_0 ,__snake_case=4_0_0 ,__snake_case=True ,__snake_case=None ,__snake_case=True ,):
"""simple docstring"""
A_ = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
A_ = parent
A_ = batch_size
A_ = num_channels
A_ = image_size
A_ = min_resolution
A_ = max_resolution
A_ = do_resize
A_ = size
A_ = apply_ocr
def __UpperCAmelCase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase_ ( __snake_case , unittest.TestCase ):
__lowerCAmelCase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,'''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase_ ,'''apply_ocr''' ) )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 1_8, '''width''': 1_8} )
A_ = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 )
self.assertEqual(image_processor.size ,{'''height''': 4_2, '''width''': 4_2} )
def __UpperCAmelCase ( self ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ ,Image.Image )
# Test not batched input
A_ = image_processing(image_inputs[0] ,return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
self.assertIsInstance(encoding.words ,lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes ,lowerCAmelCase_ )
# Test batched
A_ = image_processing(lowerCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase_ ,numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ ,np.ndarray )
# Test not batched input
A_ = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
A_ = image_processing(lowerCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase_ ,torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ ,torch.Tensor )
# Test not batched input
A_ = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
A_ = image_processing(lowerCAmelCase_ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ = load_dataset('''hf-internal-testing/fixtures_docvqa''' ,split='''test''' )
A_ = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
A_ = image_processing(lowerCAmelCase_ ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
A_ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,lowerCAmelCase_ )
self.assertListEqual(encoding.boxes ,lowerCAmelCase_ )
# with apply_OCR = False
A_ = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
A_ = image_processing(lowerCAmelCase_ ,return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
| 188 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = logging.get_logger(__name__)
class snake_case__ ( enum.Enum ):
'''simple docstring'''
__A = 0
__A = 1
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''generated'''
def __init__( self : List[str] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> int:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Tuple , ) -> Dict:
UpperCAmelCase_ = {}
if truncation is not None:
UpperCAmelCase_ = truncation
UpperCAmelCase_ = generate_kwargs
UpperCAmelCase_ = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ = self.tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Dict:
return True
def UpperCamelCase ( self : Optional[int] , *lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase_ = ([prefix + arg for arg in args[0]],)
UpperCAmelCase_ = True
elif isinstance(args[0] , lowerCAmelCase_ ):
UpperCAmelCase_ = (prefix + args[0],)
UpperCAmelCase_ = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCAmelCase_ = self.tokenizer(*lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : List[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
if (
isinstance(args[0] , lowerCAmelCase_ )
and all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for el in args[0] )
and all(len(lowerCAmelCase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase_ : Tuple ) -> str:
UpperCAmelCase_ = self._parse_and_tokenize(lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ )
return inputs
def UpperCamelCase ( self : int , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Any:
if self.framework == "pt":
UpperCAmelCase_, UpperCAmelCase_ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase_, UpperCAmelCase_ = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase_ = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase_ , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase_ = self.model.generate(**lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase_ = output_ids.reshape(lowerCAmelCase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase_ = tf.reshape(lowerCAmelCase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=ReturnType.TEXT , lowerCAmelCase_ : List[str]=False ) -> Union[str, Any]:
UpperCAmelCase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase_ = {
F'''{self.return_name}_text''': self.tokenizer.decode(
lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ , )
}
records.append(lowerCAmelCase_ )
return records
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''summary'''
def __call__( self : Optional[int] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Tuple ) -> List[str]:
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def UpperCamelCase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> bool:
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(__snake_case )
class snake_case__ ( __snake_case ):
'''simple docstring'''
__A = '''translation'''
def UpperCamelCase ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def UpperCamelCase ( self : Tuple , *lowerCAmelCase_ : int , lowerCAmelCase_ : int=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Union[str, Any]=None ) -> Optional[int]:
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase_ ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase_ , return_tensors=self.framework , truncation=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ )
else:
return super()._parse_and_tokenize(*lowerCAmelCase_ , truncation=lowerCAmelCase_ )
def UpperCamelCase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : int ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ = super()._sanitize_parameters(**lowerCAmelCase_ )
if src_lang is not None:
UpperCAmelCase_ = src_lang
if tgt_lang is not None:
UpperCAmelCase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase_ = kwargs.get('''task''' , self.task )
UpperCAmelCase_ = task.split('''_''' )
if task and len(lowerCAmelCase_ ) == 4:
# translation, XX, to YY
UpperCAmelCase_ = items[1]
UpperCAmelCase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> int:
return super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 121 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str =params
UpperCamelCase__ : Dict =np.array(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[int] =np.array([len(__SCREAMING_SNAKE_CASE) for t in data])
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.lengths)
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
assert len(self.token_ids) == len(self.lengths)
assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths)))
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =self.params.max_model_input_size
UpperCamelCase__ : List[Any] =self.lengths > max_len
logger.info(F'''Splitting {sum(__SCREAMING_SNAKE_CASE)} too long sequences.''')
def divide_chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return [l[i : i + n] for i in range(0 , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)]
UpperCamelCase__ : str =[]
UpperCamelCase__ : List[Any] =[]
if self.params.mlm:
UpperCamelCase__ : int =self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase__ : Optional[int] =self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_)
new_lengths.append(len_)
else:
UpperCamelCase__ : List[Any] =[]
for sub_s in divide_chunks(seq_ , max_len - 2):
if sub_s[0] != cls_id:
UpperCamelCase__ : List[str] =np.insert(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE)
if sub_s[-1] != sep_id:
UpperCamelCase__ : int =np.insert(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
assert len(__SCREAMING_SNAKE_CASE) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__SCREAMING_SNAKE_CASE)
new_tok_ids.extend(__SCREAMING_SNAKE_CASE)
new_lengths.extend([len(__SCREAMING_SNAKE_CASE) for l in sub_seqs])
UpperCamelCase__ : Optional[Any] =np.array(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict =np.array(__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] =len(self)
UpperCamelCase__ : List[Any] =self.lengths > 11
UpperCamelCase__ : int =self.token_ids[indices]
UpperCamelCase__ : List[str] =self.lengths[indices]
UpperCamelCase__ : Tuple =len(self)
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''')
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ : Any =self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Optional[Any] =len(self)
UpperCamelCase__ : List[Any] =np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids])
UpperCamelCase__ : Optional[Any] =(unk_occs / self.lengths) < 0.5
UpperCamelCase__ : List[str] =self.token_ids[indices]
UpperCamelCase__ : Optional[int] =self.lengths[indices]
UpperCamelCase__ : int =len(self)
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''')
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self)} sequences''')
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =[t[0] for t in batch]
UpperCamelCase__ : Dict =[t[1] for t in batch]
assert len(__SCREAMING_SNAKE_CASE) == len(__SCREAMING_SNAKE_CASE)
# Max for paddings
UpperCamelCase__ : int =max(__SCREAMING_SNAKE_CASE)
# Pad token ids
if self.params.mlm:
UpperCamelCase__ : Dict =self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase__ : int =self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Any =[list(t.astype(__SCREAMING_SNAKE_CASE)) + [pad_idx] * (max_seq_len_ - len(__SCREAMING_SNAKE_CASE)) for t in token_ids]
assert len(tk_) == len(__SCREAMING_SNAKE_CASE)
assert all(len(__SCREAMING_SNAKE_CASE) == max_seq_len_ for t in tk_)
UpperCamelCase__ : Any =torch.tensor(tk_) # (bs, max_seq_len_)
UpperCamelCase__ : List[str] =torch.tensor(__SCREAMING_SNAKE_CASE) # (bs)
return tk_t, lg_t
| 703 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( A_ : Any ) -> str:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( A_ : Optional[Any] , A_ : int ) -> str:
'''simple docstring'''
return (-y * np.log(A_ ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( A_ : Any , A_ : Union[str, Any] , A_ : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Dict =np.dot(A_ , A_ )
return np.sum(y * scores - np.log(1 + np.exp(A_ ) ) )
def _lowerCamelCase ( A_ : Optional[int] , A_ : List[str] , A_ : Any , A_ : Dict=7_0_0_0_0 ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Tuple =np.zeros(x.shape[1] )
for iterations in range(A_ ):
UpperCamelCase__ : List[Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Optional[int] =sigmoid_function(A_ )
UpperCamelCase__ : Optional[Any] =np.dot(x.T , h - y ) / y.size
UpperCamelCase__ : Optional[int] =theta - alpha * gradient # updating the weights
UpperCamelCase__ : Union[str, Any] =np.dot(A_ , A_ )
UpperCamelCase__ : Any =sigmoid_function(A_ )
UpperCamelCase__ : Dict =cost_function(A_ , A_ )
if iterations % 1_0_0 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__UpperCAmelCase = datasets.load_iris()
__UpperCAmelCase = iris.data[:, :2]
__UpperCAmelCase = (iris.target != 0) * 1
__UpperCAmelCase = 0.1
__UpperCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( A_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(A_ , A_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((__UpperCAmelCase) , (__UpperCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__UpperCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
__UpperCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 582 | 0 |
"""simple docstring"""
from collections import deque
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : str , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = process_name # process name
__UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__UpperCAmelCase = arrival_time
__UpperCAmelCase = burst_time # remaining burst time
__UpperCAmelCase = 0 # total time of the process wait in ready queue
__UpperCAmelCase = 0 # time from arrival time to completion time
class _UpperCAmelCase :
def __init__( self : List[str] , _lowercase : int , _lowercase : list[int] , _lowercase : deque[Process] , _lowercase : int , ):
# total number of mlfq's queues
__UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
__UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
__UpperCAmelCase = queue
# current time
__UpperCAmelCase = current_time
# finished process is in this sequence queue
__UpperCAmelCase = deque()
def a ( self : Dict ):
__UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a ( self : str , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a ( self : Any , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a ( self : Tuple , _lowercase : list[Process] ):
__UpperCAmelCase = []
for i in range(len(_lowercase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a ( self : Optional[int] , _lowercase : deque[Process] ):
return [q.burst_time for q in queue]
def a ( self : str , _lowercase : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a ( self : Union[str, Any] , _lowercase : deque[Process] ):
__UpperCAmelCase = deque() # sequence deque of finished process
while len(_lowercase ) != 0:
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowercase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__UpperCAmelCase = 0
# set the process's turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
__UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a ( self : Union[str, Any] , _lowercase : deque[Process] , _lowercase : int ):
__UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowercase ) ):
__UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowercase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowercase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__UpperCAmelCase = 0
# set the finish time
__UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
__UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowercase )
self.finish_queue.extend(_lowercase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a ( self : Union[str, Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__UpperCAmelCase , __UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase : List[str] = Process('P1', 0, 53)
_lowercase : str = Process('P2', 0, 17)
_lowercase : Union[str, Any] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : Any = 3
_lowercase : Union[str, Any] = [17, 25]
_lowercase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
_lowercase : Optional[Any] = Process('P1', 0, 53)
_lowercase : Tuple = Process('P2', 0, 17)
_lowercase : Optional[int] = Process('P3', 0, 68)
_lowercase : int = Process('P4', 0, 24)
_lowercase : int = 3
_lowercase : int = [17, 25]
_lowercase : List[str] = deque([Pa, Pa, Pa, Pa])
_lowercase : List[Any] = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase : str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 49 |
'''simple docstring'''
def _A ( _lowerCAmelCase = 2_000_000 ):
"""simple docstring"""
__lowercase =[0 for i in range(n + 1 )]
__lowercase =1
__lowercase =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowerCAmelCase ):
__lowercase =1
__lowercase =0
for i in range(_lowerCAmelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"{solution() = }")
| 474 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 197 | 1 |
'''simple docstring'''
def snake_case_ ( __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : List[str]) -> Union[str, Any]:
# Return True if there is node that has not iterated.
lowerCAmelCase_ = [False] * len(__snake_case)
lowerCAmelCase_ = []
queue.append(__snake_case)
lowerCAmelCase_ = True
while queue:
lowerCAmelCase_ = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__snake_case)
lowerCAmelCase_ = True
lowerCAmelCase_ = u
return visited[t]
def snake_case_ ( __snake_case : str , __snake_case : Any , __snake_case : Union[str, Any]) -> str:
# This array is filled by BFS and to store path
lowerCAmelCase_ = [-1] * (len(__snake_case))
lowerCAmelCase_ = 0
while bfs(__snake_case , __snake_case , __snake_case , __snake_case):
lowerCAmelCase_ = float('''Inf''')
lowerCAmelCase_ = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ = min(__snake_case , graph[parent[s]][s])
lowerCAmelCase_ = parent[s]
max_flow += path_flow
lowerCAmelCase_ = sink
while v != source:
lowerCAmelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ = parent[v]
return max_flow
A_ : Optional[int] =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A_ , A_ : Tuple =0, 5
print(ford_fulkerson(graph, source, sink))
| 274 | import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase_ ( __lowercase, __lowercase, __lowercase, unittest.TestCase ):
UpperCAmelCase = StableUnCLIPPipeline
UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
UpperCAmelCase = False
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = 32
_UpperCamelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
_UpperCamelCase = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
_UpperCamelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
_UpperCamelCase = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL()
_UpperCamelCase = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def UpperCamelCase_ ( self : Dict , _A : Tuple , _A : Dict=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_UpperCamelCase = pipe('''anime turle''' , generator=_A , output_type='''np''' )
_UpperCamelCase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 10 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase = pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self ) -> List[str]:
import faiss
lowerCAmelCase_ :Dataset = self._create_dummy_dataset()
lowerCAmelCase_ :str = dset.map(
lambda __A , __A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__A , keep_in_memory=__A )
lowerCAmelCase_ :Optional[int] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase_ , lowerCAmelCase_ :int = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __lowerCAmelCase ( self ) -> Tuple:
import faiss
lowerCAmelCase_ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __lowerCAmelCase ( self ) -> int:
import faiss
lowerCAmelCase_ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__A ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__A , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self ) -> Any:
from elasticsearch import Elasticsearch
lowerCAmelCase_ :Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCAmelCase_ :Tuple = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase_ :List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
lowerCAmelCase_ :Dict = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__A )
lowerCAmelCase_ , lowerCAmelCase_ :int = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> List[str]:
import faiss
lowerCAmelCase_ :List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase_ :int = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ , lowerCAmelCase_ :Any = index.search(__A )
self.assertRaises(__A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase_ :Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase_ , lowerCAmelCase_ :Any = index.search_batch(__A )
self.assertRaises(__A , index.search_batch , queries[0] )
lowerCAmelCase_ :List[Any] = [scores[0] for scores in total_scores]
lowerCAmelCase_ :Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __A )
def __lowerCAmelCase ( self ) -> str:
import faiss
lowerCAmelCase_ :int = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase_ :int = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__A ):
lowerCAmelCase_ :Dict = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self ) -> Any:
import faiss
lowerCAmelCase_ :List[Any] = faiss.IndexFlat(5 )
lowerCAmelCase_ :Dict = FaissIndex(custom_index=__A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self ) -> str:
import faiss
lowerCAmelCase_ :Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__A ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase_ :Optional[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase_ :Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :int = 1
lowerCAmelCase_ , lowerCAmelCase_ :int = index.search(__A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( lowercase__ : Optional[int] ) -> int:
'''simple docstring'''
import faiss
lowerCAmelCase_ :Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase_ :List[str] = """index.faiss"""
lowerCAmelCase_ :Optional[Any] = f"""mock://{index_name}"""
index.save(lowercase__ , storage_options=mockfs.storage_options )
lowerCAmelCase_ :Union[str, Any] = FaissIndex.load(lowercase__ , storage_options=mockfs.storage_options )
lowerCAmelCase_ :Dict = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :Tuple = 1
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = index.search(lowercase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCAmelCase_ :str = Elasticsearch()
lowerCAmelCase_ :Optional[Any] = {"""acknowledged""": True}
lowerCAmelCase_ :Union[str, Any] = ElasticSearchIndex(es_client=__A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
lowerCAmelCase_ :Tuple = """foo"""
lowerCAmelCase_ :List[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCAmelCase_ , lowerCAmelCase_ :int = index.search(__A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase_ :Tuple = """foo"""
lowerCAmelCase_ :List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCAmelCase_ , lowerCAmelCase_ :Any = index.search(__A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase_ :Optional[Any] = ["""foo""", """bar""", """foobar"""]
lowerCAmelCase_ :List[str] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = index.search_batch(__A )
lowerCAmelCase_ :str = [scores[0] for scores in total_scores]
lowerCAmelCase_ :Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__A ) , 0 )
self.assertListEqual([1, 1, 1] , __A )
# batched queries with timeout
lowerCAmelCase_ :Optional[int] = ["""foo""", """bar""", """foobar"""]
lowerCAmelCase_ :Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = index.search_batch(__A , request_timeout=30 )
lowerCAmelCase_ :Any = [scores[0] for scores in total_scores]
lowerCAmelCase_ :List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__A ) , 0 )
self.assertListEqual([1, 1, 1] , __A )
| 256 |
"""simple docstring"""
import numpy as np
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def _snake_case ( lowercase__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 1 |
"""simple docstring"""
from maths.prime_check import is_prime
def lowercase_ ( _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowerCamelCase : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCamelCase )
if is_prime(_lowerCamelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 646 |
def __lowerCAmelCase ( A ):
if not isinstance(A , A ):
UpperCAmelCase_ = F"Input value of [number={number}] must be an integer"
raise TypeError(A )
if number < 1:
UpperCAmelCase_ = F"Input value of [number={number}] must be > 0"
raise ValueError(A )
UpperCAmelCase_ = 1
for i in range(1 , A ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 162 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 42
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = True
@register_to_config
def __init__( self : Dict , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (6_4,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 4 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , snake_case__ : float = 0.1_8215 , ) -> str:
super().__init__()
# pass init params to Encoder
_lowerCamelCase = Encoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , down_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , act_fn=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , double_z=UpperCamelCase_ , )
# pass init params to Decoder
_lowerCamelCase = Decoder(
in_channels=UpperCamelCase_ , out_channels=UpperCamelCase_ , up_block_types=UpperCamelCase_ , block_out_channels=UpperCamelCase_ , layers_per_block=UpperCamelCase_ , norm_num_groups=UpperCamelCase_ , act_fn=UpperCamelCase_ , )
_lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_lowerCamelCase = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
_lowerCamelCase = False
_lowerCamelCase = False
# only relevant if vae tiling is enabled
_lowerCamelCase = self.config.sample_size
_lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_lowerCamelCase = 0.25
def _snake_case ( self : List[str] , snake_case__ : List[str] , snake_case__ : str=False ) -> Union[str, Any]:
if isinstance(UpperCamelCase_ , (Encoder, Decoder) ):
_lowerCamelCase = value
def _snake_case ( self : Dict , snake_case__ : bool = True ) -> Optional[int]:
_lowerCamelCase = use_tiling
def _snake_case ( self : Tuple ) -> str:
self.enable_tiling(UpperCamelCase_ )
def _snake_case ( self : Any ) -> Union[str, Any]:
_lowerCamelCase = True
def _snake_case ( self : str ) -> int:
_lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _snake_case ( self : Optional[int] ) -> Dict[str, AttentionProcessor]:
_lowerCamelCase = {}
def fn_recursive_add_processors(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
_lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase_ , UpperCamelCase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return processors
def _snake_case ( self : Optional[Any] , snake_case__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> int:
_lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase_ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(snake_case__ : str , snake_case__ : torch.nn.Module , snake_case__ : List[Any] ):
if hasattr(UpperCamelCase_ , 'set_processor' ):
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
module.set_processor(UpperCamelCase_ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase_ , UpperCamelCase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self : List[Any] ) -> int:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _snake_case ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
if self.use_slicing and x.shape[0] > 1:
_lowerCamelCase = [self.encoder(UpperCamelCase_ ) for x_slice in x.split(1 )]
_lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
_lowerCamelCase = self.encoder(UpperCamelCase_ )
_lowerCamelCase = self.quant_conv(UpperCamelCase_ )
_lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase_ , return_dict=UpperCamelCase_ )
_lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
_lowerCamelCase = self.decoder(UpperCamelCase_ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
@apply_forward_hook
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_lowerCamelCase = [self._decode(UpperCamelCase_ ).sample for z_slice in z.split(1 )]
_lowerCamelCase = torch.cat(UpperCamelCase_ )
else:
_lowerCamelCase = self._decode(UpperCamelCase_ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase_ )
def _snake_case ( self : Dict , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> List[Any]:
_lowerCamelCase = min(a.shape[2] , b.shape[2] , UpperCamelCase_ )
for y in range(UpperCamelCase_ ):
_lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Any ) -> str:
_lowerCamelCase = min(a.shape[3] , b.shape[3] , UpperCamelCase_ )
for x in range(UpperCamelCase_ ):
_lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _snake_case ( self : str , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ) -> AutoencoderKLOutput:
_lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_lowerCamelCase = []
for i in range(0 , x.shape[2] , UpperCamelCase_ ):
_lowerCamelCase = []
for j in range(0 , x.shape[3] , UpperCamelCase_ ):
_lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_lowerCamelCase = self.encoder(UpperCamelCase_ )
_lowerCamelCase = self.quant_conv(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
_lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
_lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
_lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
_lowerCamelCase = DiagonalGaussianDistribution(UpperCamelCase_ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase_ )
def _snake_case ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
_lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_lowerCamelCase = []
for i in range(0 , z.shape[2] , UpperCamelCase_ ):
_lowerCamelCase = []
for j in range(0 , z.shape[3] , UpperCamelCase_ ):
_lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_lowerCamelCase = self.post_quant_conv(UpperCamelCase_ )
_lowerCamelCase = self.decoder(UpperCamelCase_ )
row.append(UpperCamelCase_ )
rows.append(UpperCamelCase_ )
_lowerCamelCase = []
for i, row in enumerate(UpperCamelCase_ ):
_lowerCamelCase = []
for j, tile in enumerate(UpperCamelCase_ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_lowerCamelCase = self.blend_v(rows[i - 1][j] , UpperCamelCase_ , UpperCamelCase_ )
if j > 0:
_lowerCamelCase = self.blend_h(row[j - 1] , UpperCamelCase_ , UpperCamelCase_ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(UpperCamelCase_ , dim=3 ) )
_lowerCamelCase = torch.cat(UpperCamelCase_ , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ )
def _snake_case ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_lowerCamelCase = sample
_lowerCamelCase = self.encode(UpperCamelCase_ ).latent_dist
if sample_posterior:
_lowerCamelCase = posterior.sample(generator=UpperCamelCase_ )
else:
_lowerCamelCase = posterior.mode()
_lowerCamelCase = self.decode(UpperCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase_ ) | 704 | A = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase ( UpperCamelCase : str ) -> int:
_lowerCamelCase = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
_lowerCamelCase = Stack()
_lowerCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''') | 234 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( __A ):
A_ = ["image_processor", "tokenizer"]
A_ = "LayoutLMv2ImageProcessor"
A_ = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self , __a=None , __a=None , **__a ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase_ , )
__a : List[Any] = kwargs.pop('feature_extractor' )
__a : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , __a , __a = None , __a = None , __a = None , __a = None , __a = True , __a = False , __a = None , __a = None , __a = 0 , __a = None , __a = None , __a = None , __a = False , __a = False , __a = False , __a = False , __a = True , __a = None , **__a , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes '
'if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('You cannot return overflowing tokens without returning the offsets mapping.' )
# first, apply the image processor
__a : Optional[Any] = self.image_processor(images=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__a : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a : Union[str, Any] = features["words"]
__a : Tuple = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel values
__a : int = features.pop('pixel_values' )
if return_overflowing_tokens is True:
__a : Union[str, Any] = self.get_overflowing_images(lowerCAmelCase_ , encoded_inputs['overflow_to_sample_mapping'] )
__a : Optional[Any] = images
return encoded_inputs
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : List[Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f""" {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" )
return images_with_overflow
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __UpperCAmelCase ( self , *__a , **__a ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase_ , )
return self.image_processor
| 476 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A__ ) ,version.parse(A__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : int = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase_ : int = {}
for w in want_range:
UpperCAmelCase_ : str = re.findall(r"^([\s!=<>]{1,2})(.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : List[Any] = ".".join([str(A__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
return
# check if any version is installed
try:
UpperCAmelCase_ : str = importlib.metadata.version(A__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A__ ,A__ )
| 95 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCAmelCase( __lowerCamelCase ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
__a = [image]
__a = [trans(img.convert('RGB' ) ) for img in image]
__a = torch.stack(__lowerCamelCase )
return image
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
__a = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , UpperCAmelCase )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
if not isinstance(UpperCAmelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCAmelCase )}''' )
__a = image.to(device=UpperCAmelCase , dtype=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(UpperCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__a = init_latents.shape
__a = randn_tensor(UpperCAmelCase , generator=UpperCAmelCase , device=UpperCAmelCase , dtype=UpperCAmelCase )
# get latents
print('add noise to latents at timestep' , UpperCAmelCase )
__a = self.scheduler.add_noise(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = 0.8 , UpperCAmelCase = 1 , UpperCAmelCase = None , UpperCAmelCase = 0.0 , UpperCAmelCase = 5_0 , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(UpperCAmelCase )
# 2. Preprocess image
__a = preprocess(UpperCAmelCase )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCAmelCase , device=self.device )
__a , __a = self.get_timesteps(UpperCAmelCase , UpperCAmelCase , self.device )
__a = timesteps[:1].repeat(UpperCAmelCase )
# 4. Prepare latent variables
__a = self.prepare_latents(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , self.unet.dtype , self.device , UpperCAmelCase )
__a = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCAmelCase ):
# 1. predict noise model_output
__a = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__a = self.scheduler.step(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , eta=UpperCAmelCase , use_clipped_model_output=UpperCAmelCase , generator=UpperCAmelCase , ).prev_sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCAmelCase )
| 246 | import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class a__ ( __snake_case , unittest.TestCase ):
A__ : List[str] = TextToVideoSDPipeline
A__ : Dict = TEXT_TO_IMAGE_PARAMS
A__ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
A__ : int = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
torch.manual_seed(0 )
__a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
__a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
__a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a = CLIPTextModel(UpperCAmelCase )
__a = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[int]:
if str(UpperCAmelCase ).startswith('mps' ):
__a = torch.manual_seed(UpperCAmelCase )
else:
__a = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a = self.get_dummy_components()
__a = TextToVideoSDPipeline(**UpperCAmelCase )
__a = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = 'np'
__a = sd_pipe(**UpperCAmelCase ).frames
__a = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__a = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
__a = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a = pipe.to('cuda' )
__a = 'Spiderman is surfing'
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2_5 , output_type='pt' ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
__a = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__a = pipe.to('cuda' )
__a = 'Spiderman is surfing'
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=2 , output_type='pt' ).frames
__a = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 246 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase ( __lowerCAmelCase):
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import torch
def __snake_case ():
"""simple docstring"""
if torch.cuda.is_available():
lowerCamelCase_ : Optional[int] = torch.cuda.device_count()
else:
lowerCamelCase_ : str = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 501 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a__ :
@property
def UpperCAmelCase( self : str ):
return self.get_dummy_input()
@property
def UpperCAmelCase( self : Dict ):
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : Any=True , lowerCamelCase_ : str=False , lowerCamelCase_ : str=False , lowerCamelCase_ : str=False , ):
a_ : Optional[int] = 4
a_ : Union[str, Any] = 3_2
a_ : Dict = (3_2, 3_2)
a_ : Tuple = torch.manual_seed(0 )
a_ : str = torch.device(lowerCamelCase_ )
a_ : Dict = (batch_size, num_channels) + sizes
a_ : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ )
a_ : List[str] = {"""hidden_states""": hidden_states}
if include_temb:
a_ : Any = 1_2_8
a_ : Any = randn_tensor((batch_size, temb_channels) , generator=lowerCamelCase_ , device=lowerCamelCase_ )
if include_res_hidden_states_tuple:
a_ : List[Any] = torch.manual_seed(1 )
a_ : Any = (randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ ),)
if include_encoder_hidden_states:
a_ : str = floats_tensor((batch_size, 3_2, 3_2) ).to(lowerCamelCase_ )
if include_skip_sample:
a_ : Any = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCamelCase_ , device=lowerCamelCase_ )
return dummy_input
def UpperCAmelCase( self : str ):
a_ : List[Any] = {
"""in_channels""": 3_2,
"""out_channels""": 3_2,
"""temb_channels""": 1_2_8,
}
if self.block_type == "up":
a_ : List[str] = 3_2
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
a_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase( self : Dict , lowerCamelCase_ : str ):
a_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
a_ : List[str] = self.block_class(**lowerCamelCase_ )
unet_block.to(lowerCamelCase_ )
unet_block.eval()
with torch.no_grad():
a_ : Tuple = unet_block(**lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a_ : Optional[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
a_ : Any = output[0, -1, -3:, -3:]
a_ : int = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCamelCase_ , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def UpperCAmelCase( self : List[str] ):
a_ : Tuple = self.prepare_init_args_and_inputs_for_common()
a_ : str = self.block_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
a_ : List[str] = model(**lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a_ : Optional[int] = output[0]
a_ : Any = torch.device(lowerCamelCase_ )
a_ : List[str] = randn_tensor(output.shape , device=lowerCamelCase_ )
a_ : Union[str, Any] = torch.nn.functional.mse_loss(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
| 713 |
import numpy as np
__lowerCamelCase = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class a__ :
def __init__( self : Union[str, Any] ):
a_ : List[Any] = np.array(lowerCamelCase_ )
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str ):
a_ , a_ : Optional[int] = np.where(letter == self.SQUARE )
a_ : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : int ):
a_ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : str ):
a_ : List[str] = message.lower()
a_ : str = message.replace(""" """ , """""" )
a_ : Union[str, Any] = message.replace("""j""" , """i""" )
a_ : Optional[Any] = np.empty((2, len(lowerCamelCase_ )) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : int = self.letter_to_numbers(message[letter_index] )
a_ : str = numbers[0]
a_ : int = numbers[1]
a_ : int = first_step.reshape(2 * len(lowerCamelCase_ ) )
a_ : Optional[Any] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Optional[Any] = int(second_step[numbers_index * 2] )
a_ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
a_ : Optional[Any] = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : List[str] = encoded_message + letter
return encoded_message
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : str ):
a_ : Optional[Any] = message.lower()
message.replace(""" """ , """""" )
a_ : int = np.empty(2 * len(lowerCamelCase_ ) )
for letter_index in range(len(lowerCamelCase_ ) ):
a_ : str = self.letter_to_numbers(message[letter_index] )
a_ : Optional[int] = numbers[0]
a_ : Optional[Any] = numbers[1]
a_ : Tuple = first_step.reshape((2, len(lowerCamelCase_ )) )
a_ : Optional[int] = """"""
for numbers_index in range(len(lowerCamelCase_ ) ):
a_ : Dict = int(second_step[0, numbers_index] )
a_ : Tuple = int(second_step[1, numbers_index] )
a_ : Tuple = self.numbers_to_letter(lowerCamelCase_ , lowerCamelCase_ )
a_ : Union[str, Any] = decoded_message + letter
return decoded_message
| 478 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __SCREAMING_SNAKE_CASE :
@property
def __lowerCamelCase ( self : Optional[int] ) ->str:
return self.get_dummy_input()
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Dict:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __lowerCamelCase ( self : List[Any] , A : List[Any]=True , A : Any=False , A : Any=False , A : Optional[int]=False , ) ->List[str]:
lowerCamelCase__ : int = 4
lowerCamelCase__ : str = 3_2
lowerCamelCase__ : Optional[Any] = (3_2, 3_2)
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : List[str] = torch.device(A )
lowerCamelCase__ : Optional[int] = (batch_size, num_channels) + sizes
lowerCamelCase__ : List[Any] = randn_tensor(A , generator=A , device=A )
lowerCamelCase__ : Optional[Any] = {'''hidden_states''': hidden_states}
if include_temb:
lowerCamelCase__ : Dict = 1_2_8
lowerCamelCase__ : Union[str, Any] = randn_tensor((batch_size, temb_channels) , generator=A , device=A )
if include_res_hidden_states_tuple:
lowerCamelCase__ : int = torch.manual_seed(1 )
lowerCamelCase__ : int = (randn_tensor(A , generator=A , device=A ),)
if include_encoder_hidden_states:
lowerCamelCase__ : Tuple = floats_tensor((batch_size, 3_2, 3_2) ).to(A )
if include_skip_sample:
lowerCamelCase__ : Tuple = randn_tensor(((batch_size, 3) + sizes) , generator=A , device=A )
return dummy_input
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : Optional[int] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
if self.block_type == "up":
lowerCamelCase__ : Dict = 3_2
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
lowerCamelCase__ : Any = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : str , A : Dict ) ->Dict:
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : int = self.block_class(**A )
unet_block.to(A )
unet_block.eval()
with torch.no_grad():
lowerCamelCase__ : Any = unet_block(**A )
if isinstance(A , A ):
lowerCamelCase__ : List[Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCamelCase__ : Dict = output[0, -1, -3:, -3:]
lowerCamelCase__ : Dict = torch.tensor(A ).to(A )
assert torch_all_close(output_slice.flatten() , A , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = self.block_class(**A )
model.to(A )
model.train()
lowerCamelCase__ : int = model(**A )
if isinstance(A , A ):
lowerCamelCase__ : Optional[int] = output[0]
lowerCamelCase__ : List[Any] = torch.device(A )
lowerCamelCase__ : Optional[Any] = randn_tensor(output.shape , device=A )
lowerCamelCase__ : List[str] = torch.nn.functional.mse_loss(A , A )
loss.backward()
| 315 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 315 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _a):
_UpperCAmelCase : Dict = ['image_processor', 'tokenizer']
_UpperCAmelCase : Tuple = 'LayoutLMv2ImageProcessor'
_UpperCAmelCase : str = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __init__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int=None ,__SCREAMING_SNAKE_CASE : Dict=None ,**__SCREAMING_SNAKE_CASE : Dict ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = kwargs.pop("feature_extractor" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,__SCREAMING_SNAKE_CASE : Union[List[List[int]], List[List[List[int]]]] = None ,__SCREAMING_SNAKE_CASE : Optional[Union[List[int], List[List[int]]]] = None ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False ,__SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,**__SCREAMING_SNAKE_CASE : List[Any] ,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features["words"]
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["words"] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["boxes"] ,word_labels=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
# add pixel values
UpperCAmelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(__SCREAMING_SNAKE_CASE ,encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase = images
return encoded_inputs
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(__SCREAMING_SNAKE_CASE )} and {len(__SCREAMING_SNAKE_CASE )}''' )
return images_with_overflow
def _UpperCAmelCase ( self : Tuple ,*__SCREAMING_SNAKE_CASE : Optional[int] ,**__SCREAMING_SNAKE_CASE : Tuple ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Union[str, Any] ,*__SCREAMING_SNAKE_CASE : Any ,**__SCREAMING_SNAKE_CASE : Tuple ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[str] ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _UpperCAmelCase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor_class
@property
def _UpperCAmelCase ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." ,__SCREAMING_SNAKE_CASE ,)
return self.image_processor
| 405 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( _a):
_UpperCAmelCase : Optional[int] = ['image_processor', 'tokenizer']
_UpperCAmelCase : str = 'Pix2StructImageProcessor'
_UpperCAmelCase : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = False
super().__init__(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def __call__( self : Any ,__SCREAMING_SNAKE_CASE : Optional[Any]=None ,__SCREAMING_SNAKE_CASE : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False ,__SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[int] = 2_0_4_8 ,__SCREAMING_SNAKE_CASE : int = 0 ,__SCREAMING_SNAKE_CASE : Optional[int] = None ,__SCREAMING_SNAKE_CASE : Optional[bool] = None ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = False ,__SCREAMING_SNAKE_CASE : bool = True ,__SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None ,**__SCREAMING_SNAKE_CASE : Union[str, Any] ,):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
else:
# add pixel_values and bbox
UpperCAmelCase = self.image_processor(
__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,max_patches=__SCREAMING_SNAKE_CASE ,header_text=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase = self.tokenizer(
text=__SCREAMING_SNAKE_CASE ,add_special_tokens=__SCREAMING_SNAKE_CASE ,padding=__SCREAMING_SNAKE_CASE ,truncation=__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,stride=__SCREAMING_SNAKE_CASE ,pad_to_multiple_of=__SCREAMING_SNAKE_CASE ,return_attention_mask=__SCREAMING_SNAKE_CASE ,return_overflowing_tokens=__SCREAMING_SNAKE_CASE ,return_special_tokens_mask=__SCREAMING_SNAKE_CASE ,return_offsets_mapping=__SCREAMING_SNAKE_CASE ,return_token_type_ids=__SCREAMING_SNAKE_CASE ,return_length=__SCREAMING_SNAKE_CASE ,verbose=__SCREAMING_SNAKE_CASE ,return_tensors=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ,)
if "attention_mask" in text_encoding:
UpperCAmelCase = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase = text_encoding.pop("input_ids" )
else:
UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(__SCREAMING_SNAKE_CASE )
return encoding_image_processor
def _UpperCAmelCase ( self : Union[str, Any] ,*__SCREAMING_SNAKE_CASE : Union[str, Any] ,**__SCREAMING_SNAKE_CASE : str ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ,*__SCREAMING_SNAKE_CASE : Optional[int] ,**__SCREAMING_SNAKE_CASE : Dict ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 405 | 1 |
a__ : int = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a__ : Union[str, Any] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def UpperCAmelCase_ ( _UpperCAmelCase :float , _UpperCAmelCase :str , _UpperCAmelCase :str ) -> float:
'''simple docstring'''
A_ = from_type.lower().strip('''s''' )
A_ = to_type.lower().strip('''s''' )
A_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
A_ = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
A_ = (
f'Invalid \'from_type\' value: {from_type!r}.\n'
f'Conversion abbreviations are: {", ".join(_UpperCAmelCase )}'
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
A_ = (
f'Invalid \'to_type\' value: {to_type!r}.\n'
f'Conversion abbreviations are: {", ".join(_UpperCAmelCase )}'
)
raise ValueError(_UpperCAmelCase )
A_ = METRIC_CONVERSION[from_sanitized]
A_ = METRIC_CONVERSION[to_sanitized]
A_ = 1
if from_exponent > to_exponent:
A_ = from_exponent - to_exponent
else:
A_ = -(to_exponent - from_exponent)
return value * pow(10 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 188 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a__ : Any = 16
a__ : str = 32
def UpperCAmelCase_ ( _UpperCAmelCase :Accelerator , _UpperCAmelCase :int = 16 ) -> int:
'''simple docstring'''
A_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase :Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
A_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase :List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ = 16
elif accelerator.mixed_precision != "no":
A_ = 8
else:
A_ = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
A_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
A_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a__ : Optional[Any] = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] , _UpperCAmelCase :Dict ) -> Dict:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
A_ = 2
# New Code #
A_ = int(args.gradient_accumulation_steps )
A_ = int(args.local_sgd_steps )
# Initialize accelerator
A_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ = config['''lr''']
A_ = int(config['''num_epochs'''] )
A_ = int(config['''seed'''] )
A_ = int(config['''batch_size'''] )
A_ = evaluate.load('''glue''' , '''mrpc''' )
set_seed(_UpperCAmelCase )
A_ , A_ = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ = model.to(accelerator.device )
# Instantiate optimizer
A_ = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
A_ = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
A_ = model(**_UpperCAmelCase )
A_ = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ = model(**_UpperCAmelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ , A_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
A_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
A_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=_UpperCAmelCase , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A_ = parser.parse_args()
A_ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 188 | 1 |
import math
import tensorflow as tf
from packaging import version
def a__ (__lowercase :Dict ) -> int:
_A : Any = tf.convert_to_tensor(__lowercase )
_A : str = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a__ (__lowercase :str ) -> Optional[Any]:
_A : Tuple = tf.convert_to_tensor(__lowercase )
_A : List[Any] = tf.cast(math.pi , x.dtype )
_A : Union[str, Any] = tf.cast(0.04_4715 , x.dtype )
_A : Any = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowercase , 3 )) ))
return x * cdf
def a__ (__lowercase :Any ) -> Tuple:
_A : List[Any] = tf.convert_to_tensor(__lowercase )
return x * tf.tanh(tf.math.softplus(__lowercase ) )
def a__ (__lowercase :Optional[Any] ) -> Tuple:
_A : List[str] = tf.convert_to_tensor(__lowercase )
_A : int = tf.cast(0.04_4715 , x.dtype )
_A : str = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a__ (__lowercase :Union[str, Any] ) -> List[Any]:
_A : Optional[int] = tf.convert_to_tensor(__lowercase )
_A : int = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a__ (__lowercase :str ) -> Union[str, Any]:
return tf.clip_by_value(_gelu(__lowercase ) , -10 , 10 )
def a__ (__lowercase :Optional[int] , __lowercase :List[Any]=-1 ) -> Optional[Any]:
_A , _A : Optional[Any] = tf.split(__lowercase , 2 , axis=__lowercase )
return a * tf.math.sigmoid(__lowercase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def a__ (__lowercase :Optional[int] ) -> Union[str, Any]:
return tf.keras.activations.gelu(__lowercase , approximate=__lowercase )
_UpperCamelCase : List[Any] =tf.keras.activations.gelu
_UpperCamelCase : Optional[Any] =approximate_gelu_wrap
else:
_UpperCamelCase : str =_gelu
_UpperCamelCase : Union[str, Any] =_gelu_new
_UpperCamelCase : Tuple ={
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def a__ (__lowercase :Any ) -> Dict:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 332 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def a__ (__lowercase :Optional[int] ) -> Any:
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> Tuple:
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def a__ () -> List[Any]:
_A : Dict = '''mock-s3-bucket'''
_A : List[Any] = f"""s3://{mock_bucket}"""
_A : Tuple = extract_path_from_uri(__lowercase )
assert dataset_path.startswith('''s3://''' ) is False
_A : Tuple = '''./local/path'''
_A : int = extract_path_from_uri(__lowercase )
assert dataset_path == new_dataset_path
def a__ (__lowercase :Tuple ) -> Optional[int]:
_A : Optional[int] = is_remote_filesystem(__lowercase )
assert is_remote is True
_A : Optional[Any] = fsspec.filesystem('''file''' )
_A : List[Any] = is_remote_filesystem(__lowercase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __lowercase )
def a__ (__lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :List[str] , __lowercase :List[Any] , __lowercase :Any ) -> Any:
_A : str = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
_A : Tuple = input_paths[compression_fs_class.protocol]
if input_path is None:
_A : Tuple = f"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowercase )
_A : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__lowercase )
assert isinstance(__lowercase , __lowercase )
_A : Tuple = os.path.basename(__lowercase )
_A : List[Any] = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__lowercase , '''r''' , encoding='''utf-8''' ) as f, open(__lowercase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def a__ (__lowercase :Union[str, Any] , __lowercase :Optional[int] , __lowercase :Optional[int] ) -> Optional[int]:
_A : Any = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
_A : Tuple = compressed_file_paths[protocol]
_A : Tuple = '''dataset.jsonl'''
_A : List[Any] = f"""{protocol}://{member_file_path}::{compressed_file_path}"""
_A , *_A : Optional[int] = fsspec.get_fs_token_paths(__lowercase )
assert fs.isfile(__lowercase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def a__ (__lowercase :List[Any] , __lowercase :int , __lowercase :int , __lowercase :str ) -> Optional[Any]:
_A : int = hf_api.dataset_info(__lowercase , token=__lowercase )
_A : int = HfFileSystem(repo_info=__lowercase , token=__lowercase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__lowercase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def a__ () -> Optional[Any]:
_A : Any = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__lowercase , __lowercase , clobber=__lowercase )
with pytest.warns(__lowercase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__lowercase ) == 1
assert (
str(warning_info[0].message )
== f"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 332 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class snake_case :
"""simple docstring"""
_a = None
@experimental
def _UpperCamelCase ( lowerCAmelCase__: Tuple ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: List[Any] ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: str ) -> Dict:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
return _map_with_joblib(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: int ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: Optional[Any] ,lowerCAmelCase__: Optional[int] ,lowerCAmelCase__: Any ) -> int:
SCREAMING_SNAKE_CASE_ = num_proc if num_proc <= len(lowerCAmelCase__ ) else len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ ) // num_proc
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ ) % num_proc
SCREAMING_SNAKE_CASE_ = div * index + min(lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowerCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowerCAmelCase__ )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowerCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (RLock(),), tqdm.set_lock
with Pool(lowerCAmelCase__ ,initargs=lowerCAmelCase__ ,initializer=lowerCAmelCase__ ) as pool:
SCREAMING_SNAKE_CASE_ = pool.map(lowerCAmelCase__ ,lowerCAmelCase__ )
logger.info(F"""Finished {num_proc} processes""" )
SCREAMING_SNAKE_CASE_ = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowerCAmelCase__ )} objects""" )
return mapped
def _UpperCamelCase ( lowerCAmelCase__: List[Any] ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: str ,lowerCAmelCase__: Any ,lowerCAmelCase__: List[str] ,lowerCAmelCase__: Tuple ,lowerCAmelCase__: List[str] ) -> Tuple:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=lowerCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(lowerCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE_ = None
| 294 | """simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
SCREAMING_SNAKE_CASE__:Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE__:List[str] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE__:Union[str, Any] = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
SCREAMING_SNAKE_CASE__:Optional[int] = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE__:Optional[Any] = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
SCREAMING_SNAKE_CASE__:int = [
("""pretraining""", """MODEL_FOR_PRETRAINING_MAPPING_NAMES""", """AutoModelForPreTraining"""),
("""feature-extraction""", """MODEL_MAPPING_NAMES""", """AutoModel"""),
("""audio-classification""", """MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForAudioClassification"""),
("""text-generation""", """MODEL_FOR_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForCausalLM"""),
("""automatic-speech-recognition""", """MODEL_FOR_CTC_MAPPING_NAMES""", """AutoModelForCTC"""),
("""image-classification""", """MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForImageClassification"""),
("""image-segmentation""", """MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES""", """AutoModelForImageSegmentation"""),
("""fill-mask""", """MODEL_FOR_MASKED_LM_MAPPING_NAMES""", """AutoModelForMaskedLM"""),
("""object-detection""", """MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES""", """AutoModelForObjectDetection"""),
(
"""zero-shot-object-detection""",
"""MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES""",
"""AutoModelForZeroShotObjectDetection""",
),
("""question-answering""", """MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES""", """AutoModelForQuestionAnswering"""),
("""text2text-generation""", """MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES""", """AutoModelForSeq2SeqLM"""),
("""text-classification""", """MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForSequenceClassification"""),
("""automatic-speech-recognition""", """MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES""", """AutoModelForSpeechSeq2Seq"""),
(
"""table-question-answering""",
"""MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForTableQuestionAnswering""",
),
("""token-classification""", """MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForTokenClassification"""),
("""multiple-choice""", """MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES""", """AutoModelForMultipleChoice"""),
(
"""next-sentence-prediction""",
"""MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES""",
"""AutoModelForNextSentencePrediction""",
),
(
"""audio-frame-classification""",
"""MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForAudioFrameClassification""",
),
("""audio-xvector""", """MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES""", """AutoModelForAudioXVector"""),
(
"""document-question-answering""",
"""MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForDocumentQuestionAnswering""",
),
(
"""visual-question-answering""",
"""MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES""",
"""AutoModelForVisualQuestionAnswering""",
),
("""image-to-text""", """MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES""", """AutoModelForVision2Seq"""),
(
"""zero-shot-image-classification""",
"""MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES""",
"""AutoModelForZeroShotImageClassification""",
),
("""depth-estimation""", """MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES""", """AutoModelForDepthEstimation"""),
("""video-classification""", """MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES""", """AutoModelForVideoClassification"""),
("""mask-generation""", """MODEL_FOR_MASK_GENERATION_MAPPING_NAMES""", """AutoModelForMaskGeneration"""),
]
def _lowerCamelCase( a ):
__a = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , a )
return [m.group(0 ) for m in matches]
def _lowerCamelCase( ):
__a = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
__a = collections.defaultdict(a )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(a ):
__a = None
if _re_tf_models.match(a ) is not None:
__a = tf_models
__a = _re_tf_models.match(a ).groups()[0]
elif _re_flax_models.match(a ) is not None:
__a = flax_models
__a = _re_flax_models.match(a ).groups()[0]
elif _re_pt_models.match(a ) is not None:
__a = pt_models
__a = _re_pt_models.match(a ).groups()[0]
if lookup_dict is not None:
while len(a ) > 0:
if attr_name in model_prefix_to_model_type:
__a = True
break
# Try again after removing the last word in the name
__a = "".join(camel_case_split(a )[:-1] )
__a = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__a = list(a )
all_models.sort()
__a = {"model_type": all_models}
__a = [pt_models[t] for t in all_models]
__a = [tf_models[t] for t in all_models]
__a = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__a = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__a = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__a = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__a = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__a = "AutoTokenizer"
__a = [processors[t] for t in all_models]
return pd.DataFrame(a )
def _lowerCamelCase( a ):
__a = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__a = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
__a = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(a , a , a ):
# The type of pipeline may not exist in this framework
if not hasattr(a , a ):
continue
# First extract all model_names
__a = []
for name in getattr(a , a ).values():
if isinstance(a , a ):
model_names.append(a )
else:
model_names.extend(list(a ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _lowerCamelCase( a , a ):
__a = get_frameworks_table()
__a = Dataset.from_pandas(a )
__a = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=a )
__a = Dataset.from_json(a )
__a = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(a ) )
}
__a = update_pipeline_and_auto_class_table(a )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__a = sorted(table.keys() )
__a = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__a = Dataset.from_pandas(a )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(a , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(a , "pipeline_tags.json" ) )
if commit_sha is not None:
__a = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
__a = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=a , repo_type="dataset" , token=a , commit_message=a , )
def _lowerCamelCase( ):
__a = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__a = transformers_module.pipelines.SUPPORTED_TASKS
__a = []
for key in pipeline_tasks:
if key not in in_table:
__a = pipeline_tasks[key]["pt"]
if isinstance(a , (list, tuple) ):
__a = model[0]
__a = model.__name__
if model not in in_table.values():
missing.append(a )
if len(a ) > 0:
__a = ", ".join(a )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[str] = argparse.ArgumentParser()
parser.add_argument("""--token""", type=str, help="""The token to use to push to the transformers-metadata dataset.""")
parser.add_argument("""--commit_sha""", type=str, help="""The sha of the commit going with this update.""")
parser.add_argument("""--check-only""", action="""store_true""", help="""Activate to just check all pipelines are present.""")
SCREAMING_SNAKE_CASE__:Any = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 528 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
A = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
A = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
A = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
A = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
'''simple docstring'''
def __snake_case ( self ):
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=[1, 10, 100] , UpperCamelCase__=4 , UpperCamelCase__=3.0 ):
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=UpperCamelCase__ ) as executor:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : int = Counter()
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : int = defaultdict(UpperCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
for candidate in candidates:
SCREAMING_SNAKE_CASE_ : Optional[Any] = candidate + '\n' + test_case
SCREAMING_SNAKE_CASE_ : List[str] = (test_program, timeout, task_id, completion_id[task_id])
SCREAMING_SNAKE_CASE_ : Optional[Any] = executor.submit(UpperCamelCase__ , *UpperCamelCase__ )
futures.append(UpperCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
SCREAMING_SNAKE_CASE_ : List[Any] = [], []
for result in results.values():
result.sort()
SCREAMING_SNAKE_CASE_ : Any = [r[1]['passed'] for r in result]
total.append(len(UpperCamelCase__ ) )
correct.append(sum(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = k
SCREAMING_SNAKE_CASE_ : Any = {F'''pass@{k}''': estimate_pass_at_k(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowerCamelCase( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
'''simple docstring'''
def estimator(lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int = itertools.repeat(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
else:
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = iter(lowerCAmelCase__ )
return np.array([estimator(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , lowerCAmelCase__ ) for n, c in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) | 707 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : List[str]
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""Translation""" , init=__A , repr=__A )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __snake_case ( self ):
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class __a :
'''simple docstring'''
UpperCAmelCase__ : Optional[List] = None
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[str] = None
# Automatically constructed
UpperCAmelCase__ : ClassVar[str] = "dict"
UpperCAmelCase__ : ClassVar[Any] = None
UpperCAmelCase__ : str = field(default="""TranslationVariableLanguages""" , init=__A , repr=__A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = sorted(set(self.languages ) ) if self.languages else None
SCREAMING_SNAKE_CASE_ : Dict = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def __snake_case ( self , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = set(self.languages )
if self.languages and set(UpperCamelCase__ ) - lang_set:
raise ValueError(
F'''Some languages in example ({', '.join(sorted(set(UpperCamelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(UpperCamelCase__ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
SCREAMING_SNAKE_CASE_ : List[str] = []
for lang, text in translation_dict.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = zip(*sorted(UpperCamelCase__ ) )
return {"language": languages, "translation": translations}
def __snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
} | 97 | 0 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Any = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , A__ )
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: str = logging.get_logger(__name__)
__a: str = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''distilbert'''
_lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : List[Any] , lowerCamelCase : List[Any]=3_0522 , lowerCamelCase : List[Any]=512 , lowerCamelCase : Tuple=False , lowerCamelCase : Any=6 , lowerCamelCase : List[str]=12 , lowerCamelCase : List[str]=768 , lowerCamelCase : Optional[int]=4 * 768 , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Union[str, Any]="gelu" , lowerCamelCase : Any=0.02 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Dict=0.2 , lowerCamelCase : Optional[Any]=0 , **lowerCamelCase : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 402 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ).loss
_UpperCAmelCase = -tf.math.reduce_mean(lowerCamelCase ).numpy()
_UpperCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 ) | 402 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.0_2 , A__=4 , ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_attention_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_choices
def UpperCamelCase ( self ) -> Any:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_attention_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = True
_UpperCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=A__ )
snake_case = model(np.ones((1, 1) ) )
self.assertIsNotNone(A__ )
@require_flax
class _lowercase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ) -> str:
snake_case = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
snake_case = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case = model(A__ )[0]
snake_case = 5_00_00
snake_case = (1, 6, vocab_size)
self.assertEqual(output.shape , A__ )
snake_case = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , A__ , atol=1e-4 ) )
| 342 |
'''simple docstring'''
import enum
import shutil
import sys
_lowercase , _lowercase = shutil.get_terminal_size()
_lowercase = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class _lowercase ( enum.Enum ):
_UpperCAmelCase = 0
_UpperCAmelCase = 1
def __UpperCamelCase ( a : Optional[Any] , a : Tuple="" ) ->Tuple:
sys.stdout.write(str(a ) + end )
sys.stdout.flush()
def __UpperCamelCase ( a : int , a : Dict , a : Optional[Any]="" ) ->Dict:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , a )
def __UpperCamelCase ( ) ->Dict:
forceWrite('''\r''' )
def __UpperCamelCase ( a : int , a : str ) ->Optional[int]:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __UpperCamelCase ( ) ->Union[str, Any]:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __UpperCamelCase ( ) ->Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 342 | 1 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
try:
__A : List[Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__A : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
__A : Tuple = strtobool(_SCREAMING_SNAKE_CASE )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
lowerCamelCase : Union[str, Any] =parse_flag_from_env('''RUN_SLOW''', default=False)
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return unittest.skip('Test was skipped' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : Dict=None ) -> List[str]:
'''simple docstring'''
if test_case is None:
return partial(_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE )
return unittest.skipUnless(is_torch_version('>=' , _SCREAMING_SNAKE_CASE ) , F'test requires torch version >= {version}' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _lowercase ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_SCREAMING_SNAKE_CASE )
class __snake_case( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = True
@classmethod
def _a ( cls ):
'''simple docstring'''
__A : Dict = tempfile.mkdtemp()
@classmethod
def _a ( cls ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowerCamelCase )
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case( unittest.TestCase ):
'''simple docstring'''
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = mocks if isinstance(__lowerCamelCase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
__A : List[Any] = AcceleratorState()
__A : Dict = tensor[None].clone().to(state.device )
__A : Dict = gather(_SCREAMING_SNAKE_CASE ).cpu()
__A : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _SCREAMING_SNAKE_CASE ):
return False
return True
class __snake_case:
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = returncode
__A : Union[str, Any] = stdout
__A : int = stderr
async def _lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
while True:
__A : Any = await stream.readline()
if line:
callback(_SCREAMING_SNAKE_CASE )
else:
break
async def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=False , _SCREAMING_SNAKE_CASE : str=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) )
__A : Tuple = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__A : List[str] = []
__A : str = []
def tee(_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any]="" ):
__A : Any = line.decode('utf-8' ).rstrip()
sink.append(_SCREAMING_SNAKE_CASE )
if not quiet:
print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ),
] , timeout=_SCREAMING_SNAKE_CASE , )
return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=180 , _SCREAMING_SNAKE_CASE : Optional[Any]=False , _SCREAMING_SNAKE_CASE : List[Any]=True ) -> _RunOutput:
'''simple docstring'''
__A : List[Any] = asyncio.get_event_loop()
__A : List[str] = loop.run_until_complete(
_stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) )
__A : Any = ' '.join(_SCREAMING_SNAKE_CASE )
if result.returncode > 0:
__A : List[str] = '\n'.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class __snake_case( A_ ):
'''simple docstring'''
pass
def _lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=False ) -> Any:
'''simple docstring'''
try:
__A : str = subprocess.check_output(_SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_SCREAMING_SNAKE_CASE , 'decode' ):
__A : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(_SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 709 | """simple docstring"""
lowerCamelCase : Optional[Any] ={
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCamelCase : str ={value: key for key, value in MORSE_CODE_DICT.items()}
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _lowercase ( ) -> None:
'''simple docstring'''
__A : Union[str, Any] = 'Morse code here!'
print(_SCREAMING_SNAKE_CASE )
__A : Any = encrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
__A : str = decrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 237 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( __lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , '''num_attention_heads''' ) )
class __lowercase :
def __init__(self , A , A=1_3 , A=3_2 , A=2 , A=3 , A=6_4_0 , A=4 , A="silu" , A=3 , A=3_2 , A=0.1 , A=0.1 , A=0.1 , A=0.02 , A=True , A=True , A=1_0 , A=None , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Union[str, Any] = batch_size
lowerCamelCase_ : str = image_size
lowerCamelCase_ : str = patch_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : str = last_hidden_size
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Optional[Any] = conv_kernel_size
lowerCamelCase_ : Any = output_stride
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = classifier_dropout_prob
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : Any = is_training
lowerCamelCase_ : Any = num_labels
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Tuple = scope
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : int = None
if self.use_labels:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ (self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = MobileViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase_ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : str = self.num_labels
lowerCamelCase_ : Any = MobileViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase_ : Any = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = self.num_labels
lowerCamelCase_ : Any = MobileViTForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCamelCase_ : Dict = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase_ : str = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ : List[str] = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : Optional[int] = False
lowerCamelCase : List[str] = False
lowerCamelCase : List[str] = False
lowerCamelCase : str = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = MobileViTModelTester(self )
lowerCamelCase_ : Optional[Any] = MobileViTConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[Any] = model_class(__lowerCamelCase )
lowerCamelCase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ (self ):
def check_hidden_states_output(A , A , A ):
lowerCamelCase_ : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCamelCase_ : int = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
lowerCamelCase_ : int = outputs.hidden_states
lowerCamelCase_ : List[str] = 5
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ : Optional[Any] = 2
for i in range(len(__lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Union[str, Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def UpperCAmelCase__ (self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Tuple = MobileViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = self.default_image_processor
lowerCamelCase_ : Dict = prepare_img()
lowerCamelCase_ : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ : str = model(**__lowerCamelCase )
# verify the logits
lowerCamelCase_ : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
lowerCamelCase_ : Optional[int] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ : Dict = model.to(__lowerCamelCase )
lowerCamelCase_ : Any = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ : List[str] = prepare_img()
lowerCamelCase_ : int = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ : int = model(**__lowerCamelCase )
lowerCamelCase_ : int = outputs.logits
# verify the logits
lowerCamelCase_ : str = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=__lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ : Optional[Any] = model.to(__lowerCamelCase )
lowerCamelCase_ : str = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : str = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(**__lowerCamelCase )
lowerCamelCase_ : Union[str, Any] = outputs.logits.detach().cpu()
lowerCamelCase_ : Dict = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(5_0, 6_0)] )
lowerCamelCase_ : Union[str, Any] = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
lowerCamelCase_ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
lowerCamelCase_ : List[Any] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
| 422 |
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase ): # This function is recursive
_SCREAMING_SNAKE_CASE : Tuple = len(__lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
_SCREAMING_SNAKE_CASE : List[str] = array[0]
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : Optional[Any] = [element for element in array[i:] if element >= array[i]]
_SCREAMING_SNAKE_CASE : Dict = longest_subsequence(__lowerCamelCase )
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = temp_array
else:
i += 1
_SCREAMING_SNAKE_CASE : int = [element for element in array[1:] if element >= pivot]
_SCREAMING_SNAKE_CASE : Any = [pivot, *longest_subsequence(__lowerCamelCase )]
if len(__lowerCamelCase ) > len(__lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod() | 249 | 0 |
'''simple docstring'''
import torch
def __lowercase ():
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Dict = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE : int = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 355 |
'''simple docstring'''
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowercase (_SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : List[Any] = 0
while number > 0:
SCREAMING_SNAKE_CASE : List[str] = number % 10
sum_of_digits += last_digit
SCREAMING_SNAKE_CASE : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowercase (_SCREAMING_SNAKE_CASE :int = 1_00 ):
SCREAMING_SNAKE_CASE : List[Any] = factorial(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = split_and_add(_SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 355 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : Any =AltDiffusionPipeline
__A : List[str] =TEXT_TO_IMAGE_PARAMS
__A : Optional[int] =TEXT_TO_IMAGE_BATCH_PARAMS
__A : List[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
UpperCAmelCase_ : int = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=_snake_case ,set_alpha_to_one=_snake_case ,)
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_02 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_snake_case )
UpperCAmelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase_ : List[Any] = 77
UpperCAmelCase_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=0 ):
if str(_snake_case ).startswith("mps" ):
UpperCAmelCase_ : int = torch.manual_seed(_snake_case )
else:
UpperCAmelCase_ : Dict = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : List[Any] = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Any = RobertaSeriesModelWithTransformation(_snake_case )
UpperCAmelCase_ : int = text_encoder
UpperCAmelCase_ : int = AltDiffusionPipeline(**_snake_case )
UpperCAmelCase_ : Dict = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : Any = "A photo of an astronaut"
UpperCAmelCase_ : Tuple = alt_pipe(**_snake_case )
UpperCAmelCase_ : Union[str, Any] = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : int = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase_ : Union[str, Any] = RobertaSeriesModelWithTransformation(_snake_case )
UpperCAmelCase_ : Tuple = text_encoder
UpperCAmelCase_ : List[str] = AltDiffusionPipeline(**_snake_case )
UpperCAmelCase_ : int = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(_snake_case )
UpperCAmelCase_ : List[Any] = alt_pipe(**_snake_case )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[int] = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
# make sure here that pndm scheduler skips prk
UpperCAmelCase_ : Optional[Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,safety_checker=_snake_case )
UpperCAmelCase_ : str = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Any = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = alt_pipe([prompt] ,generator=_snake_case ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type="np" )
UpperCAmelCase_ : str = output.images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" ,subfolder="scheduler" )
UpperCAmelCase_ : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" ,scheduler=_snake_case ,safety_checker=_snake_case )
UpperCAmelCase_ : int = alt_pipe.to(_snake_case )
alt_pipe.set_progress_bar_config(disable=_snake_case )
UpperCAmelCase_ : List[Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_ : Dict = alt_pipe([prompt] ,generator=_snake_case ,num_inference_steps=2 ,output_type="numpy" )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 71 | from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
snake_case_ : Any = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
snake_case_ : List[str] = model(A__ )["last_hidden_state"]
snake_case_ : str = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , A__ )
# compare the actual values for a slice.
snake_case_ : List[str] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 666 | 0 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a__ : Union[str, Any] = 'bert-base-cased'
a__ : Any = 'google/pegasus-xsum'
a__ : Optional[int] = [' Sam ate lunch today.', 'Sams lunch ingredients.']
a__ : Optional[Any] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
a__ : Tuple = 'patrickvonplaten/t5-tiny-random'
a__ : Dict = 'sshleifer/bart-tiny-random'
a__ : Union[str, Any] = 'sshleifer/tiny-mbart'
a__ : int = 'sshleifer/tiny-marian-en-de'
def UpperCAmelCase_ ( _UpperCAmelCase :Path , _UpperCAmelCase :list ) -> Union[str, Any]:
'''simple docstring'''
A_ = '''\n'''.join(_UpperCAmelCase )
Path(_UpperCAmelCase ).open('''w''' ).writelines(_UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCAmelCase , f'{split}.source' ) , _UpperCAmelCase )
_dump_articles(os.path.join(_UpperCAmelCase , f'{split}.target' ) , _UpperCAmelCase )
return tmp_dir
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
@slow
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case )
A_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A_ = max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
A_ = max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
A_ = 4
A_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
A_ , A_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
A_ = SeqaSeqDataset(
__snake_case ,data_dir=__snake_case ,type_path='''train''' ,max_source_length=__snake_case ,max_target_length=__snake_case ,src_lang=__snake_case ,tgt_lang=__snake_case ,)
A_ = DataLoader(__snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__snake_case ,__snake_case )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
A_ = shift_tokens_right(batch['''labels'''] ,tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case )
A_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
A_ = max(len(tokenizer.encode(__snake_case ) ) for a in ARTICLES )
A_ = max(len(tokenizer.encode(__snake_case ) ) for a in SUMMARIES )
A_ = 4
A_ = LegacySeqaSeqDataset(
__snake_case ,data_dir=__snake_case ,type_path='''train''' ,max_source_length=2_0 ,max_target_length=__snake_case ,)
A_ = DataLoader(__snake_case ,batch_size=2 ,collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
A_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
A_ = tmp_dir.joinpath('''train.source''' ).open().readlines()
A_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__snake_case ,__snake_case ,1_2_8 ,__snake_case )
A_ = {x.name for x in tmp_dir.iterdir()}
A_ = {x.name for x in save_dir.iterdir()}
A_ = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__snake_case ) < len(__snake_case )
assert len(__snake_case ) == 1
assert len(packed_examples[0] ) == sum(len(__snake_case ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason='''This test requires fairseq''' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
A_ , A_ , A_ = self._get_dataset(max_len=6_4 )
A_ = 6_4
A_ = ds.make_dynamic_sampler(__snake_case ,required_batch_size_multiple=__snake_case )
A_ = [len(__snake_case ) for x in batch_sampler]
assert len(set(__snake_case ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__snake_case ) == len(__snake_case ) # no dropped or added examples
A_ = DataLoader(__snake_case ,batch_sampler=__snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
A_ = []
A_ = []
for batch in data_loader:
A_ = batch['''input_ids'''].shape
A_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
A_ = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(__snake_case )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__snake_case )
assert num_src_per_batch[0] == max(__snake_case )
if failures:
raise AssertionError(f'too many tokens in {len(__snake_case )} batches' )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ , A_ = self._get_dataset(max_len=5_1_2 )
A_ = 2
A_ = ds.make_sortish_sampler(__snake_case ,shuffle=__snake_case )
A_ = DataLoader(__snake_case ,batch_size=__snake_case ,collate_fn=ds.collate_fn ,num_workers=2 )
A_ = DataLoader(__snake_case ,batch_size=__snake_case ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=__snake_case )
A_ = tokenizer.pad_token_id
def count_pad_tokens(__snake_case ,__snake_case="input_ids" ):
return [batch[k].eq(__snake_case ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__snake_case ,k='''labels''' ) ) < sum(count_pad_tokens(__snake_case ,k='''labels''' ) )
assert sum(count_pad_tokens(__snake_case ) ) < sum(count_pad_tokens(__snake_case ) )
assert len(__snake_case ) == len(__snake_case )
def __UpperCAmelCase ( self ,__snake_case=1_0_0_0 ,__snake_case=1_2_8 ):
"""simple docstring"""
if os.getenv('''USE_REAL_DATA''' ,__snake_case ):
A_ = '''examples/seq2seq/wmt_en_ro'''
A_ = max_len * 2 * 6_4
if not Path(__snake_case ).joinpath('''train.len''' ).exists():
save_len_file(__snake_case ,__snake_case )
else:
A_ = '''examples/seq2seq/test_data/wmt_en_ro'''
A_ = max_len * 4
save_len_file(__snake_case ,__snake_case )
A_ = AutoTokenizer.from_pretrained(__snake_case )
A_ = SeqaSeqDataset(
__snake_case ,data_dir=__snake_case ,type_path='''train''' ,max_source_length=__snake_case ,max_target_length=__snake_case ,n_obs=__snake_case ,)
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ , A_ = self._get_dataset()
A_ = set(DistributedSortishSampler(__snake_case ,2_5_6 ,num_replicas=2 ,rank=0 ,add_extra_examples=__snake_case ) )
A_ = set(DistributedSortishSampler(__snake_case ,2_5_6 ,num_replicas=2 ,rank=1 ,add_extra_examples=__snake_case ) )
assert idsa.intersection(__snake_case ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] ,)
def __UpperCAmelCase ( self ,__snake_case ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained(__snake_case ,use_fast=__snake_case )
if tok_name == MBART_TINY:
A_ = SeqaSeqDataset(
__snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='''train''' ,max_source_length=4 ,max_target_length=8 ,src_lang='''EN''' ,tgt_lang='''FR''' ,)
A_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
A_ = SeqaSeqDataset(
__snake_case ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='''train''' ,max_source_length=4 ,max_target_length=8 ,)
A_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__snake_case ) == 1 if tok_name == BART_TINY else len(__snake_case ) == 0
| 188 |
from __future__ import annotations
def UpperCAmelCase_ ( _UpperCAmelCase :list[float] , _UpperCAmelCase :list[float] ) -> float:
'''simple docstring'''
A_ = sorted(numsa + numsa )
A_ , A_ = divmod(len(_UpperCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
a__ : int = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 188 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_lowerCamelCase : Optional[int] = """bit"""
_lowerCamelCase : Tuple = ["""preactivation""", """bottleneck"""]
_lowerCamelCase : Dict = ["""SAME""", """VALID"""]
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , _SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE="preactivation" , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
a_ = global_padding.upper()
else:
raise ValueError(f"""Padding strategy {global_padding} not supported""" )
a_ = num_channels
a_ = embedding_size
a_ = hidden_sizes
a_ = depths
a_ = layer_type
a_ = hidden_act
a_ = global_padding
a_ = num_groups
a_ = drop_path_rate
a_ = embedding_dynamic_padding
a_ = output_stride
a_ = width_factor
a_ = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
a_ , a_ = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names ) | 717 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
a_ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=UpperCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=UpperCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=UpperCamelCase )
return parser.parse_args()
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
a_ = parse_args()
# Import training_script as a module.
a_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a_ = script_fpath.stem
a_ = importlib.import_module(UpperCamelCase )
# Patch sys.argv
a_ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 403 | 0 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : List[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : Dict = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : List[str] = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[Any] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Any = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : List[str] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : int = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : int = basename(lowercase_ )
lowercase__ : List[Any] = dirname(lowercase_ )
lowercase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : int = cls.hub_models()
lowercase__ : Tuple = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Optional[Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Tuple = vars(chkpt["""args"""]["""model"""] )
lowercase__ : List[Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : Optional[Any] = dirname(lowercase_ )
lowercase__ : int = basename(lowercase_ )
# dicts
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Optional[int] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : Optional[int] = Dictionary.load(lowercase_ )
lowercase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : Any = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : List[str] = False
break
lowercase__ : Tuple = Dictionary.load(lowercase_ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Any = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[Any] = fin.read()
lowercase__ : Optional[Any] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : Tuple = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[str] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : Optional[int] = 5
lowercase__ : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Optional[int] = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : Union[str, Any] = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
lowercase__ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Dict = chkpt["""models"""][0]
lowercase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Any = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : List[Any] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : str = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : str = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 12 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Dict = logging.get_logger(__name__)
set_seed(770)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.dirname(os.path.abspath(__file__))
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(os.path.expanduser('''~'''), '''.cache''')
SCREAMING_SNAKE_CASE_ : Any = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def SCREAMING_SNAKE_CASE ( snake_case , snake_case=False ) -> Union[str, Any]:
__lowercase = model_type
if use_small:
key += "_small"
return os.path.join(snake_case , REMOTE_MODEL_PATHS[key]['file_name'] )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[str]:
os.makedirs(snake_case , exist_ok=snake_case )
hf_hub_download(repo_id=snake_case , filename=snake_case , local_dir=snake_case )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=False , snake_case="text" ) -> List[Any]:
if model_type == "text":
__lowercase = BarkSemanticModel
__lowercase = BarkSemanticConfig
__lowercase = BarkSemanticGenerationConfig
elif model_type == "coarse":
__lowercase = BarkCoarseModel
__lowercase = BarkCoarseConfig
__lowercase = BarkCoarseGenerationConfig
elif model_type == "fine":
__lowercase = BarkFineModel
__lowercase = BarkFineConfig
__lowercase = BarkFineGenerationConfig
else:
raise NotImplementedError()
__lowercase = F"{model_type}_small" if use_small else model_type
__lowercase = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(snake_case ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
__lowercase = torch.load(snake_case , map_location=snake_case )
# this is a hack
__lowercase = checkpoint['model_args']
if "input_vocab_size" not in model_args:
__lowercase = model_args['vocab_size']
__lowercase = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__lowercase = model_args.pop('n_head' )
__lowercase = model_args.pop('n_embd' )
__lowercase = model_args.pop('n_layer' )
__lowercase = ConfigClass(**checkpoint['model_args'] )
__lowercase = ModelClass(config=snake_case )
__lowercase = GenerationConfigClass()
__lowercase = model_generation_config
__lowercase = checkpoint['model']
# fixup checkpoint
__lowercase = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(snake_case ):
# replace part of the key with corresponding layer name in HF implementation
__lowercase = k[len(snake_case ) :]
for old_layer_name in new_layer_name_dict:
__lowercase = new_k.replace(snake_case , new_layer_name_dict[old_layer_name] )
__lowercase = state_dict.pop(snake_case )
__lowercase = set(state_dict.keys() ) - set(model.state_dict().keys() )
__lowercase = {k for k in extra_keys if not k.endswith('.attn.bias' )}
__lowercase = set(model.state_dict().keys() ) - set(state_dict.keys() )
__lowercase = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(snake_case ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(snake_case ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(snake_case , strict=snake_case )
__lowercase = model.num_parameters(exclude_embeddings=snake_case )
__lowercase = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1E6 , 1 )}M params, {round(snake_case , 3 )} loss" )
model.eval()
model.to(snake_case )
del checkpoint, state_dict
return model
def SCREAMING_SNAKE_CASE ( snake_case , snake_case=False , snake_case="text" ) -> Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__lowercase = 'cpu' # do conversion on cpu
__lowercase = _get_ckpt_path(snake_case , use_small=snake_case )
__lowercase = _load_model(snake_case , snake_case , model_type=snake_case , use_small=snake_case )
# load bark initial model
__lowercase = _bark_load_model(snake_case , 'cpu' , model_type=snake_case , use_small=snake_case )
if model_type == "text":
__lowercase = bark_model['model']
if model.num_parameters(exclude_embeddings=snake_case ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__lowercase = 5
__lowercase = 10
if model_type in ["text", "coarse"]:
__lowercase = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
__lowercase = bark_model(snake_case )[0]
__lowercase = model(snake_case )
# take last logits
__lowercase = output_new_model_total.logits[:, [-1], :]
else:
__lowercase = 3
__lowercase = 8
__lowercase = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__lowercase = model(snake_case , snake_case )
__lowercase = bark_model(snake_case , snake_case )
__lowercase = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(snake_case ).mkdir(exist_ok=snake_case )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[str]:
__lowercase = os.path.join(snake_case , snake_case )
__lowercase = BarkSemanticConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = BarkCoarseConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = BarkFineConfig.from_pretrained(os.path.join(snake_case , 'config.json' ) )
__lowercase = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__lowercase = BarkSemanticModel.from_pretrained(snake_case )
__lowercase = BarkCoarseModel.from_pretrained(snake_case )
__lowercase = BarkFineModel.from_pretrained(snake_case )
__lowercase = EncodecModel.from_pretrained('facebook/encodec_24khz' )
__lowercase = BarkConfig.from_sub_model_configs(
snake_case , snake_case , snake_case , snake_case )
__lowercase = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__lowercase = BarkModel(snake_case )
__lowercase = semantic
__lowercase = coarseAcoustic
__lowercase = fineAcoustic
__lowercase = codec
__lowercase = bark_generation_config
Path(snake_case ).mkdir(exist_ok=snake_case )
bark.save_pretrained(snake_case , repo_id=snake_case , push_to_hub=snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 375 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCAmelCase__ ( __A ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "gpt_neox"
def __init__( self : Optional[int] , lowercase__ : Dict=5_0_4_3_2 , lowercase__ : Tuple=6_1_4_4 , lowercase__ : Optional[int]=4_4 , lowercase__ : Optional[Any]=6_4 , lowercase__ : int=2_4_5_7_6 , lowercase__ : int="gelu" , lowercase__ : Optional[int]=0.2_5 , lowercase__ : List[str]=1_0_0_0_0 , lowercase__ : Dict=0.0 , lowercase__ : Dict=0.0 , lowercase__ : Dict=0.1 , lowercase__ : Optional[Any]=2_0_4_8 , lowercase__ : int=0.0_2 , lowercase__ : str=1e-5 , lowercase__ : Union[str, Any]=True , lowercase__ : List[Any]=0 , lowercase__ : Union[str, Any]=2 , lowercase__ : Optional[Any]=False , lowercase__ : Optional[int]=True , lowercase__ : Any=None , **lowercase__ : int , ):
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
__lowercase : Optional[int] = vocab_size
__lowercase : Tuple = max_position_embeddings
__lowercase : Optional[Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : Optional[Any] = rotary_pct
__lowercase : int = rotary_emb_base
__lowercase : Optional[Any] = attention_dropout
__lowercase : List[Any] = hidden_dropout
__lowercase : int = classifier_dropout
__lowercase : Dict = initializer_range
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Optional[Any] = use_cache
__lowercase : int = tie_word_embeddings
__lowercase : Any = use_parallel_residual
__lowercase : Optional[int] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def snake_case ( self : Optional[int] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'got {self.rope_scaling}' )
__lowercase : Union[str, Any] = self.rope_scaling.get("type" , lowercase__ )
__lowercase : List[Any] = self.rope_scaling.get("factor" , lowercase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowercase__ , lowercase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 704 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_lowerCamelCase, _lowerCamelCase ):
raise TypeError("Input value must be a 'int' type" )
return bin(_lowerCamelCase ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __A ( self ) -> int:
_UpperCAmelCase = SMALL_MODEL_IDENTIFIER
_UpperCAmelCase = "pt"
_UpperCAmelCase = "tf"
def __A ( self , snake_case_ ) -> Optional[int]:
_UpperCAmelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(snake_case_ )
def __A ( self , snake_case_ ) -> Optional[Any]:
_UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=snake_case_ )
model_tf.save_pretrained(snake_case_ )
def __A ( self ) -> Any:
_UpperCAmelCase = "mock_framework"
# Framework provided - return whatever the user provides
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case_ )
_UpperCAmelCase = FeaturesManager.determine_framework(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case_ )
_UpperCAmelCase = FeaturesManager.determine_framework(snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def __A ( self ) -> List[Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(snake_case_ )
_UpperCAmelCase = FeaturesManager.determine_framework(snake_case_ )
self.assertEqual(snake_case_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(snake_case_ )
_UpperCAmelCase = FeaturesManager.determine_framework(snake_case_ )
self.assertEqual(snake_case_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = FeaturesManager.determine_framework(snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
with patch("transformers.onnx.features.is_tf_available" , snake_case_ ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
with patch("transformers.onnx.features.is_torch_available" , snake_case_ ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case_ , self.framework_tf )
# Both in environment -> use PyTorch
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
with patch("transformers.onnx.features.is_tf_available" , snake_case_ ), patch(
"transformers.onnx.features.is_torch_available" , snake_case_ ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(snake_case_ , self.framework_pt )
# Both not in environment -> raise error
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
_UpperCAmelCase = MagicMock(return_value=snake_case_ )
with patch("transformers.onnx.features.is_tf_available" , snake_case_ ), patch(
"transformers.onnx.features.is_torch_available" , snake_case_ ):
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
| 426 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
if "resnet-50" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
_UpperCAmelCase = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
_UpperCAmelCase = DetrConfig(use_timm_backbone=A__ , backbone_config=A__ )
# set label attributes
_UpperCAmelCase = "panoptic" in model_name
if is_panoptic:
_UpperCAmelCase = 250
else:
_UpperCAmelCase = 91
_UpperCAmelCase = "huggingface/label-files"
_UpperCAmelCase = "coco-detection-id2label.json"
_UpperCAmelCase = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase = {int(A__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def A__ ( A__ ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
F"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
F"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def A__ ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = val
def A__ ( A__ , A__=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ""
if is_panoptic:
_UpperCAmelCase = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:256, :]
_UpperCAmelCase = in_proj_bias[:256]
_UpperCAmelCase = in_proj_weight[256:512, :]
_UpperCAmelCase = in_proj_bias[256:512]
_UpperCAmelCase = in_proj_weight[-256:, :]
_UpperCAmelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:256, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:256]
_UpperCAmelCase = in_proj_weight_cross_attn[256:512, :]
_UpperCAmelCase = in_proj_bias_cross_attn[256:512]
_UpperCAmelCase = in_proj_weight_cross_attn[-256:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-256:]
def A__ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def A__ ( A__ , A__=None , A__=False ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = get_detr_config(A__ )
# load original model from torch hub
_UpperCAmelCase = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(F"""Converting model {model_name}...""" )
_UpperCAmelCase = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=A__ ).eval()
_UpperCAmelCase = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A__ ):
if is_panoptic:
_UpperCAmelCase = "detr." + src
rename_key(A__ , A__ , A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
_UpperCAmelCase = state_dict.pop(A__ )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = DetrForSegmentation(A__ ) if is_panoptic else DetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion on an image
_UpperCAmelCase = "coco_panoptic" if is_panoptic else "coco_detection"
_UpperCAmelCase = DetrImageProcessor(format=A__ )
_UpperCAmelCase = processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase = encoding["pixel_values"]
_UpperCAmelCase = detr(A__ )
_UpperCAmelCase = model(A__ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 426 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 709 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __A, unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = DanceDiffusionPipeline
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
lowerCamelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase = False
lowerCamelCase = False
def UpperCAmelCase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
A_ : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
A_ : Optional[int] = IPNDMScheduler()
A_ : Optional[int] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 ) -> List[str]:
if str(_lowerCamelCase ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(_lowerCamelCase )
else:
A_ : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
A_ : Any = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
A_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.get_dummy_components()
A_ : Any = DanceDiffusionPipeline(**_lowerCamelCase )
A_ : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
A_ : Union[str, Any] = pipe(**_lowerCamelCase )
A_ : Union[str, Any] = output.audios
A_ : Any = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
A_ : str = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ) -> List[Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self ) -> Optional[int]:
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ) -> Optional[Any]:
A_ : Optional[Any] = torch_device
A_ : str = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
A_ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Dict = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
A_ : Any = output.audios
A_ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : List[str] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ) -> str:
A_ : Union[str, Any] = torch_device
A_ : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
A_ : Dict = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
A_ : Dict = torch.manual_seed(0 )
A_ : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
A_ : Dict = output.audios
A_ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
A_ : Union[str, Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 385 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.