code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = 0
@slow
def __a ( self ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsNotNone(a )
self.assertIsInstance(a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(a ) , 0 )
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def __a ( self ):
UpperCamelCase__ = AutoConfig.from_pretrained(a )
self.assertIsInstance(a , a )
# Check that tokenizer_type ≠ model_type
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def __a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(a , "vocab.txt" ) )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , tokenizer_type="bert" , use_fast=a )
self.assertIsInstance(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(a , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(a , "merges.txt" ) )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , tokenizer_type="gpt2" , use_fast=a )
self.assertIsInstance(a , a )
@require_tokenizers
def __a ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(a , "vocab.txt" ) )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , tokenizer_type="bert" )
self.assertIsInstance(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(a , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(a , "merges.txt" ) )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , tokenizer_type="gpt2" )
self.assertIsInstance(a , a )
def __a ( self ):
with pytest.raises(a ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def __a ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCamelCase__ = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(a , (BertTokenizer, BertTokenizerFast) )
if isinstance(a , a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , a )
else:
self.assertEqual(tokenizer.do_lower_case , a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
@require_tokenizers
def __a ( self ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
a , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
UpperCamelCase__ = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __a ( self ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCamelCase__ = TOKENIZER_MAPPING.values()
UpperCamelCase__ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(a )
@require_tokenizers
def __a ( self ):
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=a ) , a )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , a )
@require_tokenizers
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=a )
UpperCamelCase__ = "Hello, world. How are you?"
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertEqual("[UNK]" , tokens[0] )
UpperCamelCase__ = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=a )
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(a ) , a )
self.assertEqual(tokenizer.model_max_length , 5_12 )
self.assertEqual(tokenizer.vocab_size , 3_00_00 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(a , a )
def __a ( self ):
# Check we can load the tokenizer config of an online model.
UpperCamelCase__ = get_tokenizer_config("bert-base-cased" )
UpperCamelCase__ = config.pop("_commit_hash" , a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(a , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCamelCase__ = get_tokenizer_config(a )
self.assertDictEqual(a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = get_tokenizer_config(a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def __a ( self ):
try:
AutoConfig.register("custom" , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoTokenizer.register(a , slow_tokenizer_class=a )
UpperCamelCase__ = CustomTokenizer.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __a ( self ):
try:
AutoConfig.register("custom" , a )
# Can register in two steps
AutoTokenizer.register(a , slow_tokenizer_class=a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(a , fast_tokenizer_class=a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
a , slow_tokenizer_class=a , fast_tokenizer_class=a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoTokenizer.register(a , fast_tokenizer_class=a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = BertTokenizerFast.from_pretrained(a )
bert_tokenizer.save_pretrained(a )
UpperCamelCase__ = CustomTokenizerFast.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a )
self.assertIsInstance(a , a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , use_fast=a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __a ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a )
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , trust_remote_code=a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a , use_fast=a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a )
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , trust_remote_code=a , use_fast=a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def __a ( self ):
class lowercase_ ( a__ ):
__UpperCAmelCase = False
class lowercase_ ( a__ ):
__UpperCAmelCase = NewTokenizer
__UpperCAmelCase = False
try:
AutoConfig.register("custom" , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoTokenizer.register(a , fast_tokenizer_class=a )
# If remote code is not set, the default is to use local
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a , use_fast=a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=a , use_fast=a )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __a ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
UpperCamelCase__ = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=a , use_fast=a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __a ( self ):
with self.assertRaisesRegex(
a , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("bert-base" )
def __a ( self ):
with self.assertRaisesRegex(
a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCamelCase__ = AutoTokenizer.from_pretrained(a , revision="aaaaaa" )
def __a ( self ):
# Make sure we have cached the tokenizer.
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__A , 2 ) - pow(__A , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__A , 2 ) - pow(__A , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__A , 2 ) + pow(__A , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'mra'
def __init__( self , a=5_02_65 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=1 , a=0.02 , a=1e-5 , a="absolute" , a=4 , a="full" , a=0 , a=0 , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = block_per_row
UpperCamelCase__ = approx_mode
UpperCamelCase__ = initial_prior_first_n_blocks
UpperCamelCase__ = initial_prior_diagonal_n_blocks
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a__ : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a__ : int = {'facebook/blenderbot_small-90M': 5_1_2}
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ = char
UpperCamelCase__ = set(__A )
return pairs
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a , a="__start__" , a="__end__" , a="__unk__" , a="__null__" , **a , ):
super().__init__(unk_token=a , bos_token=a , eos_token=a , pad_token=a , **a )
with open(a , encoding="utf-8" ) as vocab_handle:
UpperCamelCase__ = json.load(a )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
with open(a , encoding="utf-8" ) as merges_handle:
UpperCamelCase__ = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase__ = [tuple(merge.split() ) for merge in merges]
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = {}
@property
def __a ( self ):
return len(self.encoder )
def __a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , a ):
if token in self.cache:
return self.cache[token]
UpperCamelCase__ = re.sub("([.,!?()])" , r" \1" , a )
UpperCamelCase__ = re.sub("(')" , r" \1 " , a )
UpperCamelCase__ = re.sub(r"\s{2,}" , " " , a )
if "\n" in token:
UpperCamelCase__ = token.replace("\n" , " __newln__" )
UpperCamelCase__ = token.split(" " )
UpperCamelCase__ = []
for token in tokens:
if not len(a ):
continue
UpperCamelCase__ = token.lower()
UpperCamelCase__ = tuple(a )
UpperCamelCase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
UpperCamelCase__ = get_pairs(a )
if not pairs:
words.append(a )
continue
while True:
UpperCamelCase__ = min(a , key=lambda a : self.bpe_ranks.get(a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ = bigram
UpperCamelCase__ = []
UpperCamelCase__ = 0
while i < len(a ):
try:
UpperCamelCase__ = word.index(a , a )
new_word.extend(word[i:j] )
UpperCamelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ = tuple(a )
UpperCamelCase__ = new_word
if len(a ) == 1:
break
else:
UpperCamelCase__ = get_pairs(a )
UpperCamelCase__ = "@@ ".join(a )
UpperCamelCase__ = word[:-4]
UpperCamelCase__ = word
words.append(a )
return " ".join(a )
def __a ( self , a ):
UpperCamelCase__ = []
UpperCamelCase__ = re.findall(r"\S+\n?" , a )
for token in words:
split_tokens.extend(list(self.bpe(a ).split(" " ) ) )
return split_tokens
def __a ( self , a ):
UpperCamelCase__ = token.lower()
return self.encoder.get(a , self.encoder.get(self.unk_token ) )
def __a ( self , a ):
return self.decoder.get(a , self.unk_token )
def __a ( self , a ):
UpperCamelCase__ = " ".join(a ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , a , a = None ):
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + "\n" )
UpperCamelCase__ = 0
with open(a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase__ = token_index
writer.write(" ".join(a ) + "\n" )
index += 1
return vocab_file, merge_file
| 80 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a )
def __a ( self ):
UpperCamelCase__ = logging.get_verbosity()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a ) as cl:
logger.warning(a )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(a )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = os.getenv("TRANSFORMERS_VERBOSITY" , a )
UpperCamelCase__ = logging.log_levels[env_level_str]
UpperCamelCase__ = logging.get_verbosity()
self.assertEqual(
a , a , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def __a ( self ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.logging.getLogger()
with CaptureLogger(a ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def __a ( self ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(a ) as cl:
logger.warning_advice(a )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a ) as cl:
logger.warning_advice(a )
self.assertEqual(cl.out , msg + "\n" )
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _UpperCamelCase ( __A , __A ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _UpperCamelCase ( __A ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 11
UpperCamelCase__ = int("1" + "0" * digit_len )
for num in range(__A , __A ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__A , __A ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
UpperCamelCase__ = 10
return solutions
def _UpperCamelCase ( __A = 2 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1.0
for fraction in fraction_list(__A ):
UpperCamelCase__ = Fraction(__A )
result *= frac.denominator / frac.numerator
return int(__A )
if __name__ == "__main__":
print(solution())
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
a__ : Dict = list[tuple[int, int]]
a__ : Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__ : List[str] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowercase_ :
def __init__( self , a , a , a , a , a , a , ):
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = g_cost
UpperCamelCase__ = parent
UpperCamelCase__ = self.calculate_heuristic()
def __a ( self ):
UpperCamelCase__ = abs(self.pos_x - self.goal_x )
UpperCamelCase__ = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , a ):
return self.f_cost < other.f_cost
class lowercase_ :
def __init__( self , a , a ):
UpperCamelCase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a )
UpperCamelCase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , a )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = []
UpperCamelCase__ = False
def __a ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
UpperCamelCase__ = True
return self.retrace_path(a )
self.closed_nodes.append(a )
UpperCamelCase__ = self.get_successors(a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a )
else:
# retrieve the best current path
UpperCamelCase__ = self.open_nodes.pop(self.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a )
else:
self.open_nodes.append(a )
if not self.reached:
return [self.start.pos]
return None
def __a ( self , a ):
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) )
return successors
def __a ( self , a ):
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
a__ : Union[str, Any] = (0, 0)
a__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('------')
a__ : Any = GreedyBestFirst(init, goal)
a__ : List[Any] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
a__ : str = 2
for elem in grid:
print(elem)
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a__ : Dict = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def _UpperCamelCase ( __A , __A=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , __A )
| 80 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a ) )
def __a ( self ):
UpperCamelCase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a ) )
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(a ) )
def __a ( self ):
UpperCamelCase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(a ) )
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(a ) )
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertTrue(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
UpperCamelCase__ = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertTrue(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
# pass variant but use the non-variant filenames
UpperCamelCase__ = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertTrue(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ = "fp16"
self.assertFalse(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
UpperCamelCase__ = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertTrue(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
# pass variant but use the non-variant filenames
UpperCamelCase__ = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertTrue(is_safetensors_compatible(a , variant=a ) )
def __a ( self ):
UpperCamelCase__ = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
UpperCamelCase__ = "fp16"
self.assertFalse(is_safetensors_compatible(a , variant=a ) )
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return EnvironmentCommand()
class lowercase_ ( a__ ):
@staticmethod
def __a ( a ):
UpperCamelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=a )
def __a ( self ):
UpperCamelCase__ = huggingface_hub.__version__
UpperCamelCase__ = "not installed"
UpperCamelCase__ = "NA"
if is_torch_available():
import torch
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = "not installed"
if is_transformers_available():
import transformers
UpperCamelCase__ = transformers.__version__
UpperCamelCase__ = "not installed"
if is_accelerate_available():
import accelerate
UpperCamelCase__ = accelerate.__version__
UpperCamelCase__ = "not installed"
if is_xformers_available():
import xformers
UpperCamelCase__ = xformers.__version__
UpperCamelCase__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def __a ( a ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a__ : Dict = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
super().__init__(*a , **a )
requires_backends(self , "vision" )
self.check_model_type(a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , **a ):
return {}, {}, {}
def __a ( self , a ):
UpperCamelCase__ = load_image(a )
UpperCamelCase__ = image.size
UpperCamelCase__ = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def __a ( self , a ):
UpperCamelCase__ = self.model(**a )
return model_outputs
def __a ( self , a ):
UpperCamelCase__ = model_outputs.predicted_depth
UpperCamelCase__ = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=a )
UpperCamelCase__ = prediction.squeeze().cpu().numpy()
UpperCamelCase__ = (output * 2_55 / np.max(a )).astype("uint8" )
UpperCamelCase__ = Image.fromarray(a )
UpperCamelCase__ = {}
UpperCamelCase__ = predicted_depth
UpperCamelCase__ = depth
return output_dict
| 80 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
@slow
def __a ( self ):
UpperCamelCase__ = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
UpperCamelCase__ = {
"input_ids": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__ = model(a )["last_hidden_state"]
UpperCamelCase__ = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
UpperCamelCase__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 80 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = DiTPipeline
__UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
__UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase = False
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=a , )
UpperCamelCase__ = AutoencoderKL()
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __a ( self , a , a=0 ):
if str(a ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a )
else:
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a )
UpperCamelCase__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ):
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = self.get_dummy_inputs(a )
UpperCamelCase__ = pipe(**a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
UpperCamelCase__ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
UpperCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def __a ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
UpperCamelCase__ = ["vase", "umbrella", "white shark", "white wolf"]
UpperCamelCase__ = pipe.get_label_ids(a )
UpperCamelCase__ = pipe(a , generator=a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(a , a ):
UpperCamelCase__ = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __a ( self ):
UpperCamelCase__ = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
UpperCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
UpperCamelCase__ = ["vase", "umbrella"]
UpperCamelCase__ = pipe.get_label_ids(a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(a , generator=a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(a , a ):
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a__ : Any = False
class lowercase_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
image=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
return str(__A ) == str(__A )[::-1]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return int(__A ) + int(str(__A )[::-1] )
def _UpperCamelCase ( __A = 10000 ) -> int:
'''simple docstring'''
UpperCamelCase__ = []
for num in range(1 , __A ):
UpperCamelCase__ = 0
UpperCamelCase__ = num
while iterations < 50:
UpperCamelCase__ = sum_reverse(__A )
iterations += 1
if is_palindrome(__A ):
break
else:
lychrel_nums.append(__A )
return len(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 | 1 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Union[str, Any] = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
a__ : Optional[Any] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def _UpperCamelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase__ = int(re.match(R".*layer_(\d*).*" , __A )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCamelCase__ = re.search(R"[^\d](\d+)$" , str(__A ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCamelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if bloom_config_file == "":
UpperCamelCase__ = BloomConfig()
else:
UpperCamelCase__ = BloomConfig.from_json_file(__A )
if shard_model:
UpperCamelCase__ = os.listdir(__A )
UpperCamelCase__ = sorted(filter(lambda __A : s.startswith("layer" ) and "model_00" in s , __A ) )
UpperCamelCase__ = {"weight_map": {}, "metadata": {}}
UpperCamelCase__ = 0
UpperCamelCase__ = None
UpperCamelCase__ = BloomConfig()
for j, file in enumerate(__A ):
print("Processing file: {}".format(__A ) )
UpperCamelCase__ = None
for i in range(__A ):
# load all TP files
UpperCamelCase__ = file.replace("model_00" , F'''model_0{i}''' )
UpperCamelCase__ = torch.load(os.path.join(__A , __A ) , map_location="cpu" )
# Rename keys in the transformers names
UpperCamelCase__ = list(temp.keys() )
for key in keys:
UpperCamelCase__ = temp.pop(__A )
if tensors is None:
UpperCamelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(__A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase__ = tensors[key] / pretraining_tp
torch.save(
__A , os.path.join(
__A , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(__A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase__ = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(__A ) ).zfill(5 ) )
UpperCamelCase__ = BloomConfig()
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCamelCase__ = total_size
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__A , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
UpperCamelCase__ = json.dumps(__A , indent=2 , sort_keys=__A ) + "\n"
f.write(__A )
else:
UpperCamelCase__ = BloomModel(__A )
UpperCamelCase__ = os.listdir(__A )
UpperCamelCase__ = sorted(filter(lambda __A : s.startswith("layer" ) and "model_00" in s , __A ) )
UpperCamelCase__ = None
for i, file in enumerate(__A ):
UpperCamelCase__ = None
for i in range(__A ):
# load all TP files
UpperCamelCase__ = file.replace("model_00" , F'''model_0{i}''' )
UpperCamelCase__ = torch.load(os.path.join(__A , __A ) , map_location="cpu" )
# Rename keys in the transformers names
UpperCamelCase__ = list(temp.keys() )
for key in keys:
UpperCamelCase__ = temp.pop(__A )
if tensors is None:
UpperCamelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase__ = torch.cat([tensors[key], temp[key]] , dim=__A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase__ = tensors[key] / pretraining_tp
UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCamelCase__ = set(other_keys.missing_keys )
else:
UpperCamelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(__A , exist_ok=__A )
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCamelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , __A )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
a__ : Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 1 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCamelCase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __A ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
assert _test_patching.open is open
UpperCamelCase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __A ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __A ):
pass
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __A ) is None
with patch_submodule(_test_patching , "len" , __A ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_start_and_stop_mock__"
UpperCamelCase__ = patch_submodule(_test_patching , "open" , __A )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCamelCase__ = "__test_patch_submodule_successive_join__"
UpperCamelCase__ = "__test_patch_submodule_successive_dirname__"
UpperCamelCase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __A ):
with patch_submodule(_test_patching , "os.rename" , __A ):
with patch_submodule(_test_patching , "os.path.dirname" , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __A ):
with patch_submodule(_test_patching , "os.path.join" , __A ):
with patch_submodule(_test_patching , "os.path.dirname" , __A ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __A ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __A ):
pass
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_28, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_42, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
UpperCamelCase__ = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_28,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_42,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(a ) , a )
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a ) , x.transpose() ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(transpose(a ) , transpose(a ).numpy() ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , transpose(a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(transpose(a ) , np.asarray(transpose(a ) ) ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(transpose(a , axes=(1, 2, 0) ) , np.asarray(transpose(a , axes=(1, 2, 0) ) ) ) )
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.reshape(a , (4, 3) ) ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.reshape(a , (12, 5) ) ) )
@require_torch
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_tf
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , reshape(a , (4, 3) ).numpy() ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , reshape(a , (12, 5) ).numpy() ) )
@require_flax
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (4, 3) ) , np.asarray(reshape(a , (4, 3) ) ) ) )
UpperCamelCase__ = np.random.randn(3 , 4 , 5 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(reshape(a , (12, 5) ) , np.asarray(reshape(a , (12, 5) ) ) ) )
def __a ( self ):
UpperCamelCase__ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a ) , np.squeeze(a ) ) )
UpperCamelCase__ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.squeeze(a , axis=2 ) ) )
@require_torch
def __a ( self ):
UpperCamelCase__ = np.random.randn(1 , 3 , 4 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
UpperCamelCase__ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_tf
def __a ( self ):
UpperCamelCase__ = np.random.randn(1 , 3 , 4 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a ) , squeeze(a ).numpy() ) )
UpperCamelCase__ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , squeeze(a , axis=2 ).numpy() ) )
@require_flax
def __a ( self ):
UpperCamelCase__ = np.random.randn(1 , 3 , 4 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a ) , np.asarray(squeeze(a ) ) ) )
UpperCamelCase__ = np.random.randn(1 , 4 , 1 , 5 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(squeeze(a , axis=2 ) , np.asarray(squeeze(a , axis=2 ) ) ) )
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.expand_dims(a , axis=1 ) ) )
@require_torch
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = torch.tensor(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_tf
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = tf.constant(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , expand_dims(a , axis=1 ).numpy() ) )
@require_flax
def __a ( self ):
UpperCamelCase__ = np.random.randn(3 , 4 )
UpperCamelCase__ = jnp.array(a )
self.assertTrue(np.allclose(expand_dims(a , axis=1 ) , np.asarray(expand_dims(a , axis=1 ) ) ) )
| 80 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
debug_launcher(test_script.main )
def __a ( self ):
debug_launcher(test_ops.main )
| 80 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 1 |
'''simple docstring'''
import argparse
import os
import re
a__ : Any = 'src/diffusers'
# Pattern that looks at the indentation in a line.
a__ : Any = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Union[str, Any] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Dict = re.compile(R'\[([^\]]+)\]')
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def _UpperCamelCase ( __A , __A="" , __A=None , __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCamelCase__ = ["\n".join(lines[:index] )]
else:
UpperCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCamelCase__ = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_" , "" )
return _inner
def _UpperCamelCase ( __A , __A=None ) -> Optional[Any]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCamelCase__ = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCamelCase__ = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
def _replace(__A ):
UpperCamelCase__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] ) + "]"
UpperCamelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCamelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ = sort_objects(__A , key=lambda __A : x[1] )
UpperCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
UpperCamelCase__ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ = _re_bracket_content.sub(_replace , __A )
return import_statement
def _UpperCamelCase ( __A , __A=True ) -> Optional[int]:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ = split_code_in_indented_blocks(
__A , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ = main_blocks[block_idx]
UpperCamelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCamelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCamelCase__ = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ = 0
UpperCamelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__A , "w" ) as f:
f.write("\n".join(__A ) )
def _UpperCamelCase ( __A=True ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCamelCase__ = sort_imports(os.path.join(__A , "__init__.py" ) , check_only=__A )
if result:
UpperCamelCase__ = [os.path.join(__A , "__init__.py" )]
if len(__A ) > 0:
raise ValueError(F'''Would overwrite {len(__A )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : Union[str, Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a__ : Dict = logging.get_logger(__name__)
a__ : str = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
a__ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase__ = model_type_to_module_name(__A )
UpperCamelCase__ = importlib.import_module(F'''.{module_name}''' , "transformers.models" )
try:
return getattr(__A , __A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__A , "__name__" , __A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase__ = importlib.import_module("transformers" )
if hasattr(__A , __A ):
return getattr(__A , __A )
return None
def _UpperCamelCase ( __A , __A = None , __A = False , __A = False , __A = None , __A = None , __A = None , __A = False , **__A , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_file_from_repo(
__A , __A , cache_dir=__A , force_download=__A , resume_download=__A , proxies=__A , use_auth_token=__A , revision=__A , local_files_only=__A , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(__A , encoding="utf-8" ) as reader:
return json.load(__A )
class lowercase_ :
def __init__( self ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a )
def __a ( cls , a , **a ):
UpperCamelCase__ = kwargs.pop("config" , a )
UpperCamelCase__ = kwargs.pop("trust_remote_code" , a )
UpperCamelCase__ = True
UpperCamelCase__ , UpperCamelCase__ = ImageProcessingMixin.get_image_processor_dict(a , **a )
UpperCamelCase__ = config_dict.get("image_processor_type" , a )
UpperCamelCase__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
UpperCamelCase__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase__ = config_dict.pop("feature_extractor_type" , a )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
UpperCamelCase__ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
UpperCamelCase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
UpperCamelCase__ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a , a ):
UpperCamelCase__ = AutoConfig.from_pretrained(a , **a )
# It could be in `config.image_processor_type``
UpperCamelCase__ = getattr(a , "image_processor_type" , a )
if hasattr(a , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
UpperCamelCase__ = image_processor_class_from_name(a )
UpperCamelCase__ = image_processor_auto_map is not None
UpperCamelCase__ = image_processor_class is not None or type(a ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase__ = resolve_trust_remote_code(
a , a , a , a )
if has_remote_code and trust_remote_code:
UpperCamelCase__ = get_class_from_dynamic_module(
a , a , **a )
UpperCamelCase__ = kwargs.pop("code_revision" , a )
if os.path.isdir(a ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a , **a )
elif image_processor_class is not None:
return image_processor_class.from_dict(a , **a )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase__ = IMAGE_PROCESSOR_MAPPING[type(a )]
return image_processor_class.from_dict(a , **a )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __a ( a , a ):
IMAGE_PROCESSOR_MAPPING.register(a , a )
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
'''simple docstring'''
import string
from math import logaa
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
UpperCamelCase__ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( __A , __A ) -> tuple[int, int]:
'''simple docstring'''
UpperCamelCase__ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
UpperCamelCase__ = corpus_without_punctuation.split("\n" )
UpperCamelCase__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__A ))
def _UpperCamelCase ( __A , __A , __A=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( __A , __A ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a__ : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
a__ : str = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int)
a__ : Optional[Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
a__ : Union[str, Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
a__ : List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
a__ : str = [0] * args.vocab_size
for k, v in counter.items():
a__ : Any = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 80 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
if len(__A ) != len(__A ):
raise ValueError("String lengths must match!" )
UpperCamelCase__ = 0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCamelCase__ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=a , unet=self.dummy_unet , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a , steps=4 , return_dict=a )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(raw_audio=a , generator=a , start_step=5 , steps=10 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=a , mel=a , scheduler=a )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 10) )
UpperCamelCase__ = pipe(generator=a , encoding=a )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=a )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCamelCase__ = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> list:
'''simple docstring'''
UpperCamelCase__ = len(__A )
for i in range(1 , __A ):
UpperCamelCase__ = collection[i]
UpperCamelCase__ = 0
UpperCamelCase__ = i - 1
while low <= high:
UpperCamelCase__ = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__ = mid - 1
else:
UpperCamelCase__ = mid + 1
for j in range(__A , __A , -1 ):
UpperCamelCase__ = collection[j - 1]
UpperCamelCase__ = val
return collection
if __name__ == "__main__":
a__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
a__ : str = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ : Optional[int] = logging.get_logger(__name__)
a__ : Any = Dict[str, Any]
a__ : List[Any] = List[Prediction]
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
def __init__( self , *a , **a ):
super().__init__(*a , **a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __a ( self , **a ):
UpperCamelCase__ = {}
if "threshold" in kwargs:
UpperCamelCase__ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self , *a , **a ):
return super().__call__(*a , **a )
def __a ( self , a ):
UpperCamelCase__ = load_image(a )
UpperCamelCase__ = torch.IntTensor([[image.height, image.width]] )
UpperCamelCase__ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
UpperCamelCase__ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
UpperCamelCase__ = target_size
return inputs
def __a ( self , a ):
UpperCamelCase__ = model_inputs.pop("target_size" )
UpperCamelCase__ = self.model(**a )
UpperCamelCase__ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
UpperCamelCase__ = model_inputs["bbox"]
return model_outputs
def __a ( self , a , a=0.9 ):
UpperCamelCase__ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCamelCase__ , UpperCamelCase__ = target_size[0].tolist()
def unnormalize(a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
UpperCamelCase__ , UpperCamelCase__ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCamelCase__ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCamelCase__ = [unnormalize(a ) for bbox in model_outputs["bbox"].squeeze(0 )]
UpperCamelCase__ = ["score", "label", "box"]
UpperCamelCase__ = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCamelCase__ = self.image_processor.post_process_object_detection(a , a , a )
UpperCamelCase__ = raw_annotations[0]
UpperCamelCase__ = raw_annotation["scores"]
UpperCamelCase__ = raw_annotation["labels"]
UpperCamelCase__ = raw_annotation["boxes"]
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCamelCase__ = [self._get_bounding_box(a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCamelCase__ = ["score", "label", "box"]
UpperCamelCase__ = [
dict(zip(a , a ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __a ( self , a ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = box.int().tolist()
UpperCamelCase__ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCamelCase ( __A = 1500000 ) -> int:
'''simple docstring'''
UpperCamelCase__ = defaultdict(__A )
UpperCamelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
UpperCamelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
'''simple docstring'''
from math import ceil
def _UpperCamelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = list(range(0 , __A ) )
UpperCamelCase__ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase__ = []
for i in device_map_blocks:
if device_map_blocks.count(__A ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__A )
# Missing blocks
UpperCamelCase__ = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase__ = [i for i in device_map_blocks if i not in blocks]
if len(__A ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__A ) )
if len(__A ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__A ) )
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = list(range(__A ) )
UpperCamelCase__ = int(ceil(n_layers / len(__A ) ) )
UpperCamelCase__ = [layers[i : i + n_blocks] for i in range(0 , __A , __A )]
return dict(zip(__A , __A ) )
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = torch.nn.Linear(10 , 10 )
UpperCamelCase__ = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase__ = Accelerator()
UpperCamelCase__ = accelerator.prepare(a )
try:
pickle.loads(pickle.dumps(a ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 80 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a__ : List[Any] = None
a__ : Dict = logging.get_logger(__name__)
a__ : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a__ : str = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a__ : Any = {
'moussaKam/mbarthez': 1_0_2_4,
'moussaKam/barthez': 1_0_2_4,
'moussaKam/barthez-orangesum-title': 1_0_2_4,
}
a__ : int = '▁'
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = BarthezTokenizer
def __init__( self , a=None , a=None , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , **a , ):
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def __a ( self , a , a = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , a , a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ : Dict = logging.get_logger(__name__)
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = []
def parse_line(__A ):
for line in fp:
if isinstance(__A , __A ):
UpperCamelCase__ = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__A ) > 0:
UpperCamelCase__ = "\n".join(__A )
# Only keep the warnings specified in `targets`
if any(F''': {x}: ''' in warning for x in targets ):
selected_warnings.add(__A )
buffer.clear()
continue
else:
UpperCamelCase__ = line.strip()
buffer.append(__A )
if from_gh:
for filename in os.listdir(__A ):
UpperCamelCase__ = os.path.join(__A , __A )
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with open(__A ) as fp:
parse_line(__A )
else:
try:
with zipfile.ZipFile(__A ) as z:
for filename in z.namelist():
if not os.path.isdir(__A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__A ) as fp:
parse_line(__A )
except Exception:
logger.warning(
F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = [os.path.join(__A , __A ) for p in os.listdir(__A ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__A , __A ) )
return selected_warnings
if __name__ == "__main__":
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
return values.split("," )
a__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
a__ : List[Any] = parser.parse_args()
a__ : int = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ : Any = extract_warnings(args.output_dir, args.targets)
a__ : str = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCamelCase ( __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = 0
while b > 0:
if b & 1:
UpperCamelCase__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 | 1 |
'''simple docstring'''
a__ : Union[str, Any] = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a__ : Optional[int] = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def _UpperCamelCase ( __A , __A , __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = from_type.lower().strip("s" )
UpperCamelCase__ = to_type.lower().strip("s" )
UpperCamelCase__ = UNIT_SYMBOL.get(__A , __A )
UpperCamelCase__ = UNIT_SYMBOL.get(__A , __A )
if from_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__A )}'''
)
raise ValueError(__A )
if to_sanitized not in METRIC_CONVERSION:
UpperCamelCase__ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(__A )}'''
)
raise ValueError(__A )
UpperCamelCase__ = METRIC_CONVERSION[from_sanitized]
UpperCamelCase__ = METRIC_CONVERSION[to_sanitized]
UpperCamelCase__ = 1
if from_exponent > to_exponent:
UpperCamelCase__ = from_exponent - to_exponent
else:
UpperCamelCase__ = -(to_exponent - from_exponent)
return value * pow(10 , __A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : int = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'vit_msn'
def __init__( self , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.0 , a=0.0 , a=0.02 , a=1e-06 , a=2_24 , a=16 , a=3 , a=True , **a , ):
super().__init__(**a )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError("Limit for the Catalan sequence must be ≥ 0" )
UpperCamelCase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ = 1
if upper_limit > 0:
UpperCamelCase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__A ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
a__ : Tuple = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : str = logging.get_logger(__name__)
a__ : Optional[int] = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase_ ( a__ , a__ ):
__UpperCAmelCase = 'convnextv2'
def __init__( self , a=3 , a=4 , a=4 , a=None , a=None , a="gelu" , a=0.02 , a=1e-12 , a=0.0 , a=2_24 , a=None , a=None , **a , ):
super().__init__(**a )
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_stages
UpperCamelCase__ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCamelCase__ = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase__ = hidden_act
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = image_size
UpperCamelCase__ = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 80 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a__ : List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=18 , a=30 , a=4_00 , a=None , a=True , a=True , a=None , ):
UpperCamelCase__ = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = [5_12, 10_24, 20_48, 40_96]
UpperCamelCase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __a ( self ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self ):
UpperCamelCase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCamelCase__ = Image.open(requests.get(a , stream=a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self )
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def __a ( self ):
UpperCamelCase__ = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase__ = 20_48
UpperCamelCase__ = image_processor(a , return_tensors="pt" , max_patches=a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a ):
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
UpperCamelCase__ = "Hello"
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a , header_text=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = PixaStructImageProcessor if is_vision_available() else None
def __a ( self ):
UpperCamelCase__ = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase__ = 3
@property
def __a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def __a ( self ):
# Initialize image_processor
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
UpperCamelCase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase__ = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase__ = image_processor(
a , return_tensors="pt" , max_patches=a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase_ :
def __init__( self , a , a=2 , a=True , a=False , a=10 , a=3 , a=32 * 4 , a=32 * 6 , a=4 , a=32 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def __a ( self ):
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels) , device=a ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __a ( self ):
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def __a ( self , a , a ):
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a ) , config.decoder_config.decoder_layers )
def __a ( self , a , a , a , a=False ):
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=a )
model.to(a )
model.eval()
UpperCamelCase__ = model(pixel_values=a , pixel_mask=a )
UpperCamelCase__ = model(a , output_hidden_states=a )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a , a )
def __a ( self , a , a , a , a , a ):
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=a )
model.to(a )
model.eval()
def comm_check_on_output(a ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=a , pixel_mask=a )
UpperCamelCase__ = model(a )
comm_check_on_output(a )
UpperCamelCase__ = model(
pixel_values=a , pixel_mask=a , mask_labels=a , class_labels=a )
comm_check_on_output(a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase_ ( a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__UpperCAmelCase = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self ):
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=a , has_text_modality=a )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def __a ( self ):
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def __a ( self ):
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def __a ( self ):
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def __a ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __a ( self ):
pass
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
@slow
def __a ( self ):
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(a )
self.assertIsNotNone(a )
def __a ( self ):
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"pixel_values": torch.randn((2, 3, *size) , device=a ),
"mask_labels": torch.randn((2, 10, *size) , device=a ),
"class_labels": torch.zeros(2 , 10 , device=a ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a )
UpperCamelCase__ = model(**a )
self.assertTrue(outputs.loss is not None )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a , **a , output_hidden_states=a )
def __a ( self ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a ).to(a )
UpperCamelCase__ = model(**a , output_attentions=a )
self.assertTrue(outputs.attentions is not None )
def __a ( self ):
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(a )
model.to(a )
model.train()
UpperCamelCase__ = model(a , mask_labels=a , class_labels=a ).loss
loss.backward()
def __a ( self ):
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(a )
model.to(a )
model.train()
UpperCamelCase__ = model(a , mask_labels=a , class_labels=a )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a__ : Tuple = 1E-4
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def __a ( self ):
UpperCamelCase__ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(a )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(a , return_tensors="pt" ).to(a )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase__ = model(**a )
UpperCamelCase__ = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
UpperCamelCase__ = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a , atol=a ) )
UpperCamelCase__ = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a , atol=a ) )
def __a ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(a , return_tensors="pt" ).to(a )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase__ = model(**a )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.65_12e00, -5.25_72e00, -3.35_19e00],
[3.61_69e-02, -5.90_25e00, -2.93_13e00],
[1.07_66e-04, -7.76_30e00, -5.12_63e00],
] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def __a ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(a )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(a , return_tensors="pt" ).to(a )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a , (1, 3, 8_00, 10_88) )
with torch.no_grad():
UpperCamelCase__ = model(**a )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ = torch.tensor(a ).to(a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a , atol=a ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a , atol=a ) )
def __a ( self ):
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCamelCase__ = inputs["pixel_values"].to(a )
UpperCamelCase__ = [el.to(a ) for el in inputs["mask_labels"]]
UpperCamelCase__ = [el.to(a ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCamelCase__ = model(**a )
self.assertTrue(outputs.loss is not None )
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def __a ( self , a , a ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __a ( self , a ):
UpperCamelCase__ = 0
for character in keyword:
UpperCamelCase__ = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
UpperCamelCase__ = len(self.adlist ) - 1
else:
UpperCamelCase__ = next_state
self.adlist[current_state]["output"].append(a )
def __a ( self ):
UpperCamelCase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
UpperCamelCase__ = 0
while q:
UpperCamelCase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
UpperCamelCase__ = self.adlist[r]["fail_state"]
while (
self.find_next_state(a , self.adlist[child]["value"] ) is None
and state != 0
):
UpperCamelCase__ = self.adlist[state]["fail_state"]
UpperCamelCase__ = self.find_next_state(
a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
UpperCamelCase__ = 0
UpperCamelCase__ = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __a ( self , a ):
UpperCamelCase__ = {} # returns a dict with keywords and list of its occurrences
UpperCamelCase__ = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
UpperCamelCase__ = self.adlist[current_state]["fail_state"]
UpperCamelCase__ = self.find_next_state(a , string[i] )
if next_state is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
UpperCamelCase__ = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a__ : str = logging.getLogger(__name__)
def _UpperCamelCase ( __A , __A ) -> Dict:
'''simple docstring'''
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , "config.json" ) ) and os.path.isfile(
os.path.join(__A , "config.json" ) ):
os.remove(os.path.join(__A , "config.json" ) )
if os.path.exists(os.path.join(__A , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__A , "pytorch_model.bin" ) ):
os.remove(os.path.join(__A , "pytorch_model.bin" ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def _UpperCamelCase ( __A , __A=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = 2
if unlogit:
UpperCamelCase__ = torch.pow(__A , __A )
UpperCamelCase__ = p * torch.log(__A )
UpperCamelCase__ = 0
return -plogp.sum(dim=-1 )
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _UpperCamelCase ( __A , __A , __A , __A=True , __A=True , __A=None , __A=False ) -> str:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCamelCase__ = torch.zeros(__A , __A ).to(args.device )
UpperCamelCase__ = torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
UpperCamelCase__ = torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCamelCase__ = None
UpperCamelCase__ = 0.0
UpperCamelCase__ = 0.0
for step, inputs in enumerate(tqdm(__A , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
UpperCamelCase__ = tuple(t.to(args.device ) for t in inputs )
((UpperCamelCase__) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCamelCase__ = model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
UpperCamelCase__ = entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCamelCase__ = 2
UpperCamelCase__ = torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCamelCase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__A )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__A )
logger.info("Head ranked by importance scores" )
UpperCamelCase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCamelCase__ = torch.arange(
head_importance.numel() , device=args.device )
UpperCamelCase__ = head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def _UpperCamelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(__A , __A , __A , compute_entropy=__A )
UpperCamelCase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __A , original_score * args.masking_threshold )
UpperCamelCase__ = torch.ones_like(__A )
UpperCamelCase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCamelCase__ = original_score
while current_score >= original_score * args.masking_threshold:
UpperCamelCase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCamelCase__ = float("Inf" )
UpperCamelCase__ = head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
UpperCamelCase__ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
UpperCamelCase__ = new_head_mask.view(-1 )
UpperCamelCase__ = 0.0
UpperCamelCase__ = new_head_mask.view_as(__A )
UpperCamelCase__ = new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
UpperCamelCase__ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
UpperCamelCase__ = 1 / loss
UpperCamelCase__ = datetime.now() - before_time
UpperCamelCase__ = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
UpperCamelCase__ = [
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
UpperCamelCase__ = sum(p.numel() for p in model.parameters() )
UpperCamelCase__ = datetime.now()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
UpperCamelCase__ = 1 / loss
UpperCamelCase__ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __A , __A , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __A , __A )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__A , args.output_dir )
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__A , type=__A , required=__A , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__A , type=__A , required=__A , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__A , type=__A , required=__A , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__A , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__A , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__A , type=__A , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__A , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__A , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__A , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__A , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__A , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__A , help="Batch size." )
parser.add_argument("--seed" , type=__A , default=42 )
parser.add_argument("--local_rank" , type=__A , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__A , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__A , default="" , help="Can be used for distant debugging." )
UpperCamelCase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCamelCase__ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
UpperCamelCase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCamelCase__ = torch.device("cuda" , args.local_rank )
UpperCamelCase__ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCamelCase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCamelCase__ = nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
UpperCamelCase__ = nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __A )
# Prepare dataset
UpperCamelCase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCamelCase__ = (torch.from_numpy(__A ),)
UpperCamelCase__ = TensorDataset(*__A )
UpperCamelCase__ = RandomSampler(__A )
UpperCamelCase__ = DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCamelCase__ = mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a__ : Any = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
'''simple docstring'''
a__ : List[Any] = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
assert type(__A ) in (int, float) and decimal == int(__A )
UpperCamelCase__ = int(__A )
UpperCamelCase__ = ""
UpperCamelCase__ = False
if decimal < 0:
UpperCamelCase__ = True
decimal *= -1
while decimal > 0:
UpperCamelCase__ , UpperCamelCase__ = divmod(__A , 16 )
UpperCamelCase__ = values[remainder] + hexadecimal
UpperCamelCase__ = "0x" + hexadecimal
if negative:
UpperCamelCase__ = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
UpperCamelCase__ = Image.open(requests.get(__A , stream=__A ).raw ).convert("RGB" )
return image
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def _UpperCamelCase ( __A , __A , __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(__A )
UpperCamelCase__ = val
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase__ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase__ = torch.cat((q_bias, torch.zeros_like(__A , requires_grad=__A ), v_bias) )
UpperCamelCase__ = qkv_bias
def _UpperCamelCase ( __A , __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = 364 if "coco" in model_name else 224
UpperCamelCase__ = BlipaVisionConfig(image_size=__A ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase__ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=__A ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase__ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=__A ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
UpperCamelCase__ = BlipaConfig(vision_config=__A , text_config=__A )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( __A , __A=None , __A=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
UpperCamelCase__ = tokenizer("\n" , add_special_tokens=__A ).input_ids[0]
UpperCamelCase__ , UpperCamelCase__ = get_blipa_config(__A , eos_token_id=__A )
UpperCamelCase__ = BlipaForConditionalGeneration(__A ).eval()
UpperCamelCase__ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
UpperCamelCase__ , UpperCamelCase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
UpperCamelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = load_model_and_preprocess(
name=__A , model_type=__A , is_eval=__A , device=__A )
original_model.eval()
print("Done!" )
# update state dict keys
UpperCamelCase__ = original_model.state_dict()
UpperCamelCase__ = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase__ = state_dict.pop(__A )
if key.startswith("Qformer.bert" ):
UpperCamelCase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
UpperCamelCase__ = key.replace("self" , "attention" )
if "opt_proj" in key:
UpperCamelCase__ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
UpperCamelCase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
UpperCamelCase__ = key.replace("opt" , "language" )
if key.startswith("t5" ):
UpperCamelCase__ = key.replace("t5" , "language" )
UpperCamelCase__ = val
# read in qv biases
read_in_q_v_bias(__A , __A )
UpperCamelCase__ , UpperCamelCase__ = hf_model.load_state_dict(__A , strict=__A )
assert len(__A ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase__ = load_demo_image()
UpperCamelCase__ = vis_processors["eval"](__A ).unsqueeze(0 ).to(__A )
UpperCamelCase__ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(__A )
# create processor
UpperCamelCase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__A , image_std=__A )
UpperCamelCase__ = BlipaProcessor(image_processor=__A , tokenizer=__A )
UpperCamelCase__ = processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
# make sure processor creates exact same pixel values
assert torch.allclose(__A , __A )
original_model.to(__A )
hf_model.to(__A )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase__ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
UpperCamelCase__ = hf_model(__A , __A ).logits
else:
UpperCamelCase__ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
UpperCamelCase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase__ = hf_model(__A , __A , labels=__A ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__A )
assert torch.allclose(logits[0, :3, :3] , __A , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__A )
else:
# cast to same type
UpperCamelCase__ = logits.dtype
assert torch.allclose(original_logits.to(__A ) , __A , atol=1E-2 )
print("Looks ok!" )
print("Generating a caption..." )
UpperCamelCase__ = ""
UpperCamelCase__ = tokenizer(__A , return_tensors="pt" ).input_ids.to(__A )
UpperCamelCase__ = original_model.generate({"image": original_pixel_values} )
UpperCamelCase__ = hf_model.generate(
__A , __A , do_sample=__A , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , __A )
UpperCamelCase__ = input_ids.shape[1]
UpperCamelCase__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__A )
UpperCamelCase__ = [text.strip() for text in output_text]
print("HF generation:" , __A )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__A )
hf_model.save_pretrained(__A )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
a__ : List[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a__ : Union[str, Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 80 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : List[Any] = logging.get_logger(__name__)
a__ : int = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'dpr'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a = 0 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = projection_dim
UpperCamelCase__ = position_embedding_type
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'timm_backbone'
def __init__( self , a=None , a=3 , a=True , a=True , a=None , **a , ):
super().__init__(**a )
UpperCamelCase__ = backbone
UpperCamelCase__ = num_channels
UpperCamelCase__ = features_only
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = True
UpperCamelCase__ = out_indices if out_indices is not None else (-1,)
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
a__ : Dict = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'tapas'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=10_24 , a=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , a=0.02 , a=1e-12 , a=0 , a=10.0 , a=0 , a=1.0 , a=None , a=1.0 , a=False , a=None , a=1.0 , a=1.0 , a=False , a=False , a="ratio" , a=None , a=None , a=64 , a=32 , a=False , a=True , a=False , a=False , a=True , a=False , a=None , a=None , **a , ):
super().__init__(pad_token_id=a , **a )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_sizes
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ = positive_label_weight
UpperCamelCase__ = num_aggregation_labels
UpperCamelCase__ = aggregation_loss_weight
UpperCamelCase__ = use_answer_as_supervision
UpperCamelCase__ = answer_loss_importance
UpperCamelCase__ = use_normalized_answer_loss
UpperCamelCase__ = huber_loss_delta
UpperCamelCase__ = temperature
UpperCamelCase__ = aggregation_temperature
UpperCamelCase__ = use_gumbel_for_cells
UpperCamelCase__ = use_gumbel_for_aggregation
UpperCamelCase__ = average_approximation_function
UpperCamelCase__ = cell_selection_preference
UpperCamelCase__ = answer_loss_cutoff
UpperCamelCase__ = max_num_rows
UpperCamelCase__ = max_num_columns
UpperCamelCase__ = average_logits_per_cell
UpperCamelCase__ = select_one_column
UpperCamelCase__ = allow_empty_column_selection
UpperCamelCase__ = init_cell_selection_weights_to_zero
UpperCamelCase__ = reset_position_index_per_cell
UpperCamelCase__ = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ = aggregation_labels
UpperCamelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , a ):
UpperCamelCase__ = {int(a ): v for k, v in aggregation_labels.items()}
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not nums:
return 0
UpperCamelCase__ = nums[0]
UpperCamelCase__ = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ = (
max_excluding + num,
max(__A , __A ),
)
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCamelCase__ = len(bin(__A )[3:] )
UpperCamelCase__ = bin(abs(__A ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ = (
(
"1"
+ "0" * (binary_number_length - len(__A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 1 |
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
a__ : Tuple = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(__A ) , version.parse(__A ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( __A , __A = None ) -> None:
'''simple docstring'''
UpperCamelCase__ = F'''\n{hint}''' if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __A ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = requirement, None, None
else:
UpperCamelCase__ = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __A )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F''' got {requirement}''' )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_full.split("," ) # there could be multiple requirements
UpperCamelCase__ = {}
for w in want_range:
UpperCamelCase__ = re.findall(R"^([\s!=<>]{1,2})(.+)" , __A )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F''' but got {requirement}''' )
UpperCamelCase__ , UpperCamelCase__ = match[0]
UpperCamelCase__ = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
UpperCamelCase__ = ".".join([str(__A ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__A , __A , __A , __A , __A , __A )
return
# check if any version is installed
try:
UpperCamelCase__ = importlib.metadata.version(__A )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__A , __A , __A , __A , __A , __A )
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__A , __A )
| 80 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
UpperCamelCase__ = len(__A )
# We need to create solution object to save path.
UpperCamelCase__ = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCamelCase__ = run_maze(__A , 0 , 0 , __A )
if solved:
print("\n".join(str(__A ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _UpperCamelCase ( __A , __A , __A , __A ) -> bool:
'''simple docstring'''
UpperCamelCase__ = len(__A )
# Final check point.
if i == j == (size - 1):
UpperCamelCase__ = 1
return True
UpperCamelCase__ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase__ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase__ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase__ = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
UpperCamelCase__ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A , __A ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase__ = _modexpt(__A , exponent // 2 , __A ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__A , exponent - 1 , __A )) % modulo_value
def _UpperCamelCase ( __A = 1777 , __A = 1855 , __A = 8 ) -> int:
'''simple docstring'''
UpperCamelCase__ = base
for _ in range(1 , __A ):
UpperCamelCase__ = _modexpt(__A , __A , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a__ : List[Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'deta'
__UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , a=None , a=9_00 , a=20_48 , a=6 , a=20_48 , a=8 , a=6 , a=10_24 , a=8 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=True , a=False , a="sine" , a=5 , a=4 , a=4 , a=True , a=3_00 , a=True , a=True , a=1 , a=5 , a=2 , a=1 , a=1 , a=5 , a=2 , a=0.1 , a=0.25 , **a , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a , a ):
UpperCamelCase__ = backbone_config.pop("model_type" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(a )
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_queries
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
# deformable attributes
UpperCamelCase__ = num_feature_levels
UpperCamelCase__ = encoder_n_points
UpperCamelCase__ = decoder_n_points
UpperCamelCase__ = two_stage
UpperCamelCase__ = two_stage_num_proposals
UpperCamelCase__ = with_box_refine
UpperCamelCase__ = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def __a ( self ):
return self.encoder_attention_heads
@property
def __a ( self ):
return self.d_model
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def _UpperCamelCase ( __A ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__A , max_perimeter + 1 ):
UpperCamelCase__ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__A ):
UpperCamelCase__ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _UpperCamelCase ( __A = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase__ = pythagorean_triple(__A )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( a__ ):
__UpperCAmelCase = ['image_processor', 'tokenizer']
__UpperCAmelCase = 'ViltImageProcessor'
__UpperCAmelCase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , a=None , a=None , **a ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
UpperCamelCase__ = kwargs.pop("feature_extractor" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
UpperCamelCase__ = self.image_processor
def __call__( self , a , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ):
UpperCamelCase__ = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel_values + pixel_mask
UpperCamelCase__ = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def __a ( self , *a , **a ):
return self.tokenizer.batch_decode(*a , **a )
def __a ( self , *a , **a ):
return self.tokenizer.decode(*a , **a )
@property
def __a ( self ):
UpperCamelCase__ = self.tokenizer.model_input_names
UpperCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def __a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor
| 80 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _UpperCamelCase ( __A ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = os.path.join(args.tf_model_dir , "parameters.json" )
UpperCamelCase__ = json.loads(open(__A ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
UpperCamelCase__ = args.output + ".pt"
UpperCamelCase__ = OrderedDict()
with tf.device("/CPU:0" ):
UpperCamelCase__ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCamelCase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCamelCase__ = reader.get_tensor(__A ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
UpperCamelCase__ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
UpperCamelCase__ = 8
UpperCamelCase__ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/moe" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/softmlp/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
UpperCamelCase__ = key_name[-9:-7]
for i in range(16 ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
UpperCamelCase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/mlp" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/p1/bias" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/p2/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/p2/bias" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/ln" ):
UpperCamelCase__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.norm.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/g" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.norm.weight" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/att" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
UpperCamelCase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCamelCase__ = state[:, 0, :, :]
UpperCamelCase__ = state[:, 1, :, :]
UpperCamelCase__ = state[:, 2, :, :]
UpperCamelCase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
UpperCamelCase__ = torch.tensor(__A )
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
UpperCamelCase__ = torch.tensor(__A )
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/o/kernel" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
UpperCamelCase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/an" ):
UpperCamelCase__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.norm.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif key_name.endswith("/g" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.norm.weight" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
UpperCamelCase__ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
UpperCamelCase__ = "model.%s.weight" % nlayer
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = torch.tensor(__A )
if key_name.startswith("model/wte" ):
UpperCamelCase__ = "lm_head.weight"
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = torch.tensor(__A )
elif key_name.startswith("model/wob" ):
UpperCamelCase__ = "final_logits_bias"
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = state.reshape((1, -1) )
UpperCamelCase__ = torch.tensor(__A )
elif key_name == "model/dense/kernel":
UpperCamelCase__ = "model.last_project.weight"
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(__A )
elif key_name == "model/dense_1/bias":
UpperCamelCase__ = "model.last_project.bias"
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(__A )
torch.save(__A , args.output )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
a__ : int = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 80 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 1 |
'''simple docstring'''
import argparse
import os
import re
a__ : Optional[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
a__ : int = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
a__ : Optional[int] = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _UpperCamelCase ( __A , __A = False ) -> List[Any]:
'''simple docstring'''
with open(__A , "r" , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
UpperCamelCase__ = content.split("\n" )
UpperCamelCase__ = []
UpperCamelCase__ = 0
while line_idx < len(__A ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase__ = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase__ = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase__ = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase__ = sorted(__A , key=lambda __A : _re_identifier.search(__A ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__A , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__A ) )
elif "\n".join(__A ) != content:
return True
def _UpperCamelCase ( __A = False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [os.path.join(__A , __A ) for f in os.listdir(__A ) if f.endswith(".py" )]
UpperCamelCase__ = [sort_auto_mapping(__A , overwrite=__A ) for fname in fnames]
if not overwrite and any(__A ):
UpperCamelCase__ = [f for f, d in zip(__A , __A ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(__A )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : Tuple = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : Any = sys.version_info >= (3, 1_0)
def _UpperCamelCase ( __A=None , __A=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
@dataclass
class lowercase_ :
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[1, 2, 3] )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
__UpperCAmelCase = field()
__UpperCAmelCase = field()
__UpperCAmelCase = field()
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field()
__UpperCAmelCase = None
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
class lowercase_ ( unittest.TestCase ):
def __a ( self , a , a ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , a ) and yy.get("choices" , a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](a ) , yy["type"](a ) )
del xx["type"], yy["type"]
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument("--bar" , type=a , required=a )
expected.add_argument("--baz" , type=a , required=a )
expected.add_argument("--flag" , type=a , default=a , const=a , nargs="?" )
self.argparsersEqual(a , a )
UpperCamelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase__) , ) = parser.parse_args_into_dataclasses(a , look_for_args_file=a )
self.assertFalse(example.flag )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , default=a , const=a , nargs="?" )
expected.add_argument("--baz" , type=a , default=a , const=a , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=a , dest="baz" )
expected.add_argument("--opt" , type=a , default=a )
UpperCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __a ( self ):
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(
a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=a , type=a )
expected.add_argument("--bar" , default=a , type=a , help="help message" )
expected.add_argument("--baz" , default=a , type=a )
expected.add_argument("--ces" , nargs="+" , default=[] , type=a )
expected.add_argument("--des" , nargs="+" , default=[] , type=a )
UpperCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , bar=a , baz=a , ces=[] , des=[] ) )
UpperCamelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(a , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=a , required=a )
expected.add_argument("--required_str" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
expected.add_argument("--opt" , type=a , default=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase__ = parser.parse_dict(a )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a , parser.parse_dict , a , allow_extra_keys=a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_json" )
os.mkdir(a )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_yaml" )
os.mkdir(a )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
self.assertIsNotNone(a )
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase_ ( a__ , a__ ):
@register_to_config
def __init__( self , a = 1_28 , a = 2_56 , a = 2000.0 , a = 7_68 , a = 12 , a = 12 , a = 64 , a = 20_48 , a = 0.1 , ):
super().__init__()
UpperCamelCase__ = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
UpperCamelCase__ = nn.Embedding(a , a )
UpperCamelCase__ = False
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
UpperCamelCase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = nn.Dropout(p=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , a , a , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase__ = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase__ = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase__ = self.position_encoding(a )
UpperCamelCase__ = self.continuous_inputs_projection(a )
inputs += position_encodings
UpperCamelCase__ = self.dropout(a )
# decoder: No padding present.
UpperCamelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase__ = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
UpperCamelCase__ = self.decoder_norm(a )
UpperCamelCase__ = self.post_dropout(a )
UpperCamelCase__ = self.spec_out(a )
return spec_out
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def __a ( self , a , a=None , a=None , a=None , a=None , a=None , ):
UpperCamelCase__ = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
UpperCamelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCamelCase__ = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase__ = self.layer[-1](a , a )
return (hidden_states,)
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaLayerNorm(a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
# pre_self_attention_layer_norm
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.FiLMLayer(a , a )
# Self-attention block
UpperCamelCase__ = self.attention(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a , a ):
super().__init__()
UpperCamelCase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None , a=None , ):
UpperCamelCase__ = self.layer_norm(a )
UpperCamelCase__ = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase__ = hidden_states + self.dropout(a )
return layer_output
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a , a ):
super().__init__()
UpperCamelCase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
UpperCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
UpperCamelCase__ = TaLayerNorm(a , eps=a )
UpperCamelCase__ = nn.Dropout(a )
def __a ( self , a , a=None ):
UpperCamelCase__ = self.layer_norm(a )
if conditioning_emb is not None:
UpperCamelCase__ = self.film(a , a )
UpperCamelCase__ = self.DenseReluDense(a )
UpperCamelCase__ = hidden_states + self.dropout(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Linear(a , a , bias=a )
UpperCamelCase__ = nn.Dropout(a )
UpperCamelCase__ = NewGELUActivation()
def __a ( self , a ):
UpperCamelCase__ = self.act(self.wi_a(a ) )
UpperCamelCase__ = self.wi_a(a )
UpperCamelCase__ = hidden_gelu * hidden_linear
UpperCamelCase__ = self.dropout(a )
UpperCamelCase__ = self.wo(a )
return hidden_states
class lowercase_ ( nn.Module ):
def __init__( self , a , a=1e-6 ):
super().__init__()
UpperCamelCase__ = nn.Parameter(torch.ones(a ) )
UpperCamelCase__ = eps
def __a ( self , a ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
UpperCamelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
UpperCamelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase_ ( nn.Module ):
def __a ( self , a ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(a , 3.0 )) ))
class lowercase_ ( nn.Module ):
def __init__( self , a , a ):
super().__init__()
UpperCamelCase__ = nn.Linear(a , out_features * 2 , bias=a )
def __a ( self , a , a ):
UpperCamelCase__ = self.scale_bias(a )
UpperCamelCase__ , UpperCamelCase__ = torch.chunk(a , 2 , -1 )
UpperCamelCase__ = x * (1 + scale) + shift
return x
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = str(__A )
UpperCamelCase__ = [n]
for i in range(1 , len(__A ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if len(str(__A ) ) > 3:
if not is_prime(int(str(__A )[-3:] ) ) or not is_prime(int(str(__A )[:3] ) ):
return False
return True
def _UpperCamelCase ( __A = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 13
while len(__A ) != count:
if validate(__A ):
UpperCamelCase__ = list_truncated_nums(__A )
if all(is_prime(__A ) for i in list_nums ):
list_truncated_primes.append(__A )
num += 2
return list_truncated_primes
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 80 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
try:
UpperCamelCase__ = float(__A )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCamelCase__ = decimal - int(__A )
if fractional_part == 0:
return int(__A ), 1
else:
UpperCamelCase__ = len(str(__A ).split("." )[1] )
UpperCamelCase__ = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase__ = 10**number_of_frac_digits
UpperCamelCase__ , UpperCamelCase__ = denominator, numerator
while True:
UpperCamelCase__ = dividend % divisor
if remainder == 0:
break
UpperCamelCase__ , UpperCamelCase__ = divisor, remainder
UpperCamelCase__ , UpperCamelCase__ = numerator / divisor, denominator / divisor
return int(__A ), int(__A )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
a__ : Tuple = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['BeitFeatureExtractor']
a__ : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
a__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
'''simple docstring'''
import operator as op
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
UpperCamelCase__ = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__A )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__A ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__A ) , sep=" | " )
else:
UpperCamelCase__ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__A ) , sep=" | " )
UpperCamelCase__ = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__A ) , sep=" | " )
stack.append(
str(opr[x](int(__A ) , int(__A ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__A ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
a__ : List[str] = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(__A , (list, tuple) ) or not all(
isinstance(__A , __A ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
UpperCamelCase__ = UpperCamelCase__ = UpperCamelCase__ = numbers[0]
for i in range(1 , len(__A ) ):
# update the maximum and minimum subarray products
UpperCamelCase__ = numbers[i]
if number < 0:
UpperCamelCase__ , UpperCamelCase__ = min_till_now, max_till_now
UpperCamelCase__ = max(__A , max_till_now * number )
UpperCamelCase__ = min(__A , min_till_now * number )
# update the maximum product found till now
UpperCamelCase__ = max(__A , __A )
return max_prod
| 80 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 | 1 |
'''simple docstring'''
import pytest
a__ : str = '__dummy_dataset1__'
a__ : Union[str, Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def _UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _UpperCamelCase ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _UpperCamelCase ( __A , __A , __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = dataset_loading_script_name
UpperCamelCase__ = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__A )
UpperCamelCase__ = script_dir / F'''{script_name}.py'''
with open(__A , "w" ) as f:
f.write(__A )
return str(__A )
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( a__ ):
__UpperCAmelCase = (KDPMaDiscreteScheduler,)
__UpperCAmelCase = 10
def __a ( self , **a ):
UpperCamelCase__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a )
return config
def __a ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a )
def __a ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a , beta_end=a )
def __a ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a )
def __a ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def __a ( self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__ = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a , a )
UpperCamelCase__ = model(a , a )
UpperCamelCase__ = scheduler.step(a , a , a )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a ) )
UpperCamelCase__ = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def __a ( self ):
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a , a )
UpperCamelCase__ = model(a , a )
UpperCamelCase__ = scheduler.step(a , a , a )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a ) )
UpperCamelCase__ = torch.mean(torch.abs(a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def __a ( self ):
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a )
scheduler.set_timesteps(self.num_inference_steps , device=a )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ = scheduler.scale_model_input(a , a )
UpperCamelCase__ = model(a , a )
UpperCamelCase__ = scheduler.step(a , a , a )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a ) )
UpperCamelCase__ = torch.mean(torch.abs(a ) )
if str(a ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
a__ : Any = parser.parse_args()
a__ : Any = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
@property
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __a ( self ):
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = ScoreSdeVeScheduler()
UpperCamelCase__ = ScoreSdeVePipeline(unet=a , scheduler=a )
sde_ve.to(a )
sde_ve.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=a ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=a , return_dict=a )[
0
]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = "google/ncsnpp-church-256"
UpperCamelCase__ = UNetaDModel.from_pretrained(a )
UpperCamelCase__ = ScoreSdeVeScheduler.from_pretrained(a )
UpperCamelCase__ = ScoreSdeVePipeline(unet=a , scheduler=a )
sde_ve.to(a )
sde_ve.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
__UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __a ( self , a , a , a ):
UpperCamelCase__ = TextaTextGenerationPipeline(model=a , tokenizer=a )
return generator, ["Something to write", "Something else"]
def __a ( self , a , a ):
UpperCamelCase__ = generator("Something there" )
self.assertEqual(a , [{"generated_text": ANY(a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
UpperCamelCase__ = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
UpperCamelCase__ = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a )
self.assertEqual(
a , [
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
[{"generated_text": ANY(a )}, {"generated_text": ANY(a )}],
] , )
with self.assertRaises(a ):
generator(4 )
@require_torch
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
UpperCamelCase__ = 3
UpperCamelCase__ = generator(
"Something there" , num_return_sequences=a , num_beams=a , )
UpperCamelCase__ = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(a , a )
UpperCamelCase__ = generator("This is a test" , do_sample=a , num_return_sequences=2 , return_tensors=a )
self.assertEqual(
a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
UpperCamelCase__ = generator.model.config.eos_token_id
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = generator(
["This is a test", "This is a second test"] , do_sample=a , num_return_sequences=2 , batch_size=2 , return_tensors=a , )
self.assertEqual(
a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def __a ( self ):
UpperCamelCase__ = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
UpperCamelCase__ = generator("Something there" , do_sample=a )
self.assertEqual(a , [{"generated_text": ""}] )
| 80 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase_ ( enum.Enum ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
__UpperCAmelCase = 2
@add_end_docstrings(a__ )
class lowercase_ ( a__ ):
__UpperCAmelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *a , **a ):
super().__init__(*a , **a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
UpperCamelCase__ = None
if self.model.config.prefix is not None:
UpperCamelCase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
UpperCamelCase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._sanitize_parameters(prefix=a , **self._forward_params )
UpperCamelCase__ = {**self._preprocess_params, **preprocess_params}
UpperCamelCase__ = {**self._forward_params, **forward_params}
def __a ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=None , **a , ):
UpperCamelCase__ = {}
if prefix is not None:
UpperCamelCase__ = prefix
if prefix:
UpperCamelCase__ = self.tokenizer(
a , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
UpperCamelCase__ = handle_long_generation
preprocess_params.update(a )
UpperCamelCase__ = generate_kwargs
UpperCamelCase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
UpperCamelCase__ = ReturnType.TENSORS
if return_type is not None:
UpperCamelCase__ = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase__ = self.tokenizer.encode(a , add_special_tokens=a )
if len(a ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
UpperCamelCase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self , *a , **a ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*a , **a )
def __call__( self , a , **a ):
return super().__call__(a , **a )
def __a ( self , a , a="" , a=None , **a ):
UpperCamelCase__ = self.tokenizer(
prefix + prompt_text , padding=a , add_special_tokens=a , return_tensors=self.framework )
UpperCamelCase__ = prompt_text
if handle_long_generation == "hole":
UpperCamelCase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
UpperCamelCase__ = generate_kwargs["max_new_tokens"]
else:
UpperCamelCase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
UpperCamelCase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
UpperCamelCase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def __a ( self , a , **a ):
UpperCamelCase__ = model_inputs["input_ids"]
UpperCamelCase__ = model_inputs.get("attention_mask" , a )
# Allow empty prompts
if input_ids.shape[1] == 0:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = 1
else:
UpperCamelCase__ = input_ids.shape[0]
UpperCamelCase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
UpperCamelCase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
UpperCamelCase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
UpperCamelCase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
UpperCamelCase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
UpperCamelCase__ = self.model.generate(input_ids=a , attention_mask=a , **a )
UpperCamelCase__ = generated_sequence.shape[0]
if self.framework == "pt":
UpperCamelCase__ = generated_sequence.reshape(a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
UpperCamelCase__ = tf.reshape(a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __a ( self , a , a=ReturnType.FULL_TEXT , a=True ):
UpperCamelCase__ = model_outputs["generated_sequence"][0]
UpperCamelCase__ = model_outputs["input_ids"]
UpperCamelCase__ = model_outputs["prompt_text"]
UpperCamelCase__ = generated_sequence.numpy().tolist()
UpperCamelCase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
UpperCamelCase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
UpperCamelCase__ = self.tokenizer.decode(
a , skip_special_tokens=a , clean_up_tokenization_spaces=a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) )
if return_type == ReturnType.FULL_TEXT:
UpperCamelCase__ = prompt_text + text[prompt_length:]
else:
UpperCamelCase__ = text[prompt_length:]
UpperCamelCase__ = {"generated_text": all_text}
records.append(a )
return records
| 80 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[int] = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 | 1 |
'''simple docstring'''
import argparse
import os
import re
a__ : str = 'src/transformers'
# Pattern that looks at the indentation in a line.
a__ : Union[str, Any] = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a__ : Dict = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a__ : List[Any] = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a__ : Optional[int] = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a__ : Optional[Any] = re.compile(R'\[([^\]]+)\]')
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def _UpperCamelCase ( __A , __A="" , __A=None , __A=None ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
UpperCamelCase__ = ["\n".join(lines[:index] )]
else:
UpperCamelCase__ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCamelCase__ = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__A ) )
if index < len(__A ) - 1:
UpperCamelCase__ = [lines[index + 1]]
index += 1
else:
UpperCamelCase__ = []
else:
blocks.append("\n".join(__A ) )
UpperCamelCase__ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append("\n".join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
def _inner(__A ):
return key(__A ).lower().replace("_" , "" )
return _inner
def _UpperCamelCase ( __A , __A=None ) -> Union[str, Any]:
'''simple docstring'''
def noop(__A ):
return x
if key is None:
UpperCamelCase__ = noop
# Constants are all uppercase, they go first.
UpperCamelCase__ = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCamelCase__ = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCamelCase__ = [obj for obj in objects if not key(__A )[0].isupper()]
UpperCamelCase__ = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def _UpperCamelCase ( __A ) -> Tuple:
'''simple docstring'''
def _replace(__A ):
UpperCamelCase__ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] ) + "]"
UpperCamelCase__ = import_statement.split("\n" )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCamelCase__ = 2 if lines[1].strip() == "[" else 1
UpperCamelCase__ = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCamelCase__ = sort_objects(__A , key=lambda __A : x[1] )
UpperCamelCase__ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCamelCase__ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCamelCase__ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCamelCase__ = keys[:-1]
UpperCamelCase__ = get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
UpperCamelCase__ = _re_bracket_content.sub(_replace , __A )
return import_statement
def _UpperCamelCase ( __A , __A=True ) -> Optional[Any]:
'''simple docstring'''
with open(__A , encoding="utf-8" ) as f:
UpperCamelCase__ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCamelCase__ = split_code_in_indented_blocks(
__A , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCamelCase__ = main_blocks[block_idx]
UpperCamelCase__ = block.split("\n" )
# Get to the start of the imports.
UpperCamelCase__ = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCamelCase__ = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCamelCase__ = "\n".join(block_lines[line_idx:-1] )
UpperCamelCase__ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCamelCase__ = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCamelCase__ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCamelCase__ = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCamelCase__ = [(i, key) for i, key in enumerate(__A ) if key is not None]
UpperCamelCase__ = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCamelCase__ = 0
UpperCamelCase__ = []
for i in range(len(__A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
UpperCamelCase__ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
UpperCamelCase__ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__A ) )
def _UpperCamelCase ( __A=True ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
UpperCamelCase__ = sort_imports(os.path.join(__A , "__init__.py" ) , check_only=__A )
if result:
UpperCamelCase__ = [os.path.join(__A , "__init__.py" )]
if len(__A ) > 0:
raise ValueError(F'''Would overwrite {len(__A )} files, run `make style`.''' )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a__ : Optional[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 80 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , __A , )
if isinstance(__A , torch.Tensor ):
return image
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = image[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCamelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
UpperCamelCase__ = np.concatenate(__A , axis=0 )
UpperCamelCase__ = np.array(__A ).astype(np.floataa ) / 255.0
UpperCamelCase__ = image.transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ = 2.0 * image - 1.0
UpperCamelCase__ = torch.from_numpy(__A )
elif isinstance(image[0] , torch.Tensor ):
UpperCamelCase__ = torch.cat(__A , dim=0 )
return image
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
if isinstance(__A , torch.Tensor ):
return mask
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCamelCase__ , UpperCamelCase__ = mask[0].size
UpperCamelCase__ , UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
UpperCamelCase__ = np.concatenate(__A , axis=0 )
UpperCamelCase__ = mask.astype(np.floataa ) / 255.0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = torch.from_numpy(__A )
elif isinstance(mask[0] , torch.Tensor ):
UpperCamelCase__ = torch.cat(__A , dim=0 )
return mask
class lowercase_ ( a__ ):
__UpperCAmelCase = 42
__UpperCAmelCase = 42
def __init__( self , a , a ):
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a , a , a = 2_50 , a = 0.0 , a = 10 , a = 10 , a = None , a = "pil" , a = True , ):
UpperCamelCase__ = image
UpperCamelCase__ = _preprocess_image(a )
UpperCamelCase__ = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = _preprocess_mask(a )
UpperCamelCase__ = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCamelCase__ = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ = original_image.shape
UpperCamelCase__ = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a , a , a , self.device )
UpperCamelCase__ = eta
UpperCamelCase__ = self.scheduler.timesteps[0] + 1
UpperCamelCase__ = generator[0] if isinstance(a , a ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCamelCase__ = self.unet(a , a ).sample
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a , a , a , a , a , a ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCamelCase__ = self.scheduler.undo_step(a , a , a )
UpperCamelCase__ = t
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 80 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 1 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
a__ : str = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = 'vision-encoder-decoder'
__UpperCAmelCase = True
def __init__( self , **a ):
super().__init__(**a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCamelCase__ = kwargs.pop("encoder" )
UpperCamelCase__ = encoder_config.pop("model_type" )
UpperCamelCase__ = kwargs.pop("decoder" )
UpperCamelCase__ = decoder_config.pop("model_type" )
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = AutoConfig.for_model(a , **a )
UpperCamelCase__ = True
@classmethod
def __a ( cls , a , a , **a ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
UpperCamelCase__ = True
UpperCamelCase__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a )
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.encoder.to_dict()
UpperCamelCase__ = self.decoder.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
class lowercase_ ( a__ ):
__UpperCAmelCase = version.parse('1.11' )
@property
def __a ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ):
return 1e-4
@property
def __a ( self ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class lowercase_ ( a__ ):
@property
def __a ( self ):
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
UpperCamelCase__ = {0: "batch", 1: "encoder_sequence"}
return common_inputs
def __a ( self , a , a = -1 , a = -1 , a = False , a = None , ):
import torch
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = super().generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
UpperCamelCase__ , UpperCamelCase__ = dummy_input["input_ids"].shape
UpperCamelCase__ = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase__ = dummy_input.pop("input_ids" )
UpperCamelCase__ = dummy_input.pop("attention_mask" )
UpperCamelCase__ = torch.zeros(a )
return common_inputs
class lowercase_ ( a__ ):
@property
def __a ( self ):
pass
def __a ( self , a ):
return VisionEncoderDecoderEncoderOnnxConfig(a )
def __a ( self , a , a , a = "default" ):
UpperCamelCase__ = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(a , a )
| 80 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : Union[str, Any] = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80 | 1 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a__ : Dict = False
a__ : List[str] = logging.get_logger(__name__)
a__ : int = 'ybelkada/fonts'
def _UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def _UpperCamelCase ( __A , __A , __A ) -> Tuple:
'''simple docstring'''
requires_backends(__A , ["torch"] )
_check_torch_version()
UpperCamelCase__ = image_tensor.unsqueeze(0 )
UpperCamelCase__ = torch.nn.functional.unfold(__A , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , __A , __A , -1 )
UpperCamelCase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _UpperCamelCase ( __A , __A = 36 , __A = "black" , __A = "white" , __A = 5 , __A = 5 , __A = 5 , __A = 5 , __A = None , __A = None , ) -> Image.Image:
'''simple docstring'''
requires_backends(__A , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase__ = textwrap.TextWrapper(width=80 )
UpperCamelCase__ = wrapper.wrap(text=__A )
UpperCamelCase__ = "\n".join(__A )
if font_bytes is not None and font_path is None:
UpperCamelCase__ = io.BytesIO(__A )
elif font_path is not None:
UpperCamelCase__ = font_path
else:
UpperCamelCase__ = hf_hub_download(__A , "Arial.TTF" )
UpperCamelCase__ = ImageFont.truetype(__A , encoding="UTF-8" , size=__A )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase__ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , __A ) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = temp_draw.textbbox((0, 0) , __A , __A )
# Create the actual image with a bit of padding around the text.
UpperCamelCase__ = text_width + left_padding + right_padding
UpperCamelCase__ = text_height + top_padding + bottom_padding
UpperCamelCase__ = Image.new("RGB" , (image_width, image_height) , __A )
UpperCamelCase__ = ImageDraw.Draw(__A )
draw.text(xy=(left_padding, top_padding) , text=__A , fill=__A , font=__A )
return image
def _UpperCamelCase ( __A , __A , **__A ) -> List[str]:
'''simple docstring'''
requires_backends(__A , "vision" )
# Convert to PIL image if necessary
UpperCamelCase__ = to_pil_image(__A )
UpperCamelCase__ = render_text(__A , **__A )
UpperCamelCase__ = max(header_image.width , image.width )
UpperCamelCase__ = int(image.height * (new_width / image.width) )
UpperCamelCase__ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase__ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase__ = to_numpy_array(__A )
if infer_channel_dimension_format(__A ) == ChannelDimension.LAST:
UpperCamelCase__ = to_channel_dimension_format(__A , ChannelDimension.LAST )
return new_image
class lowercase_ ( a__ ):
__UpperCAmelCase = ['flattened_patches']
def __init__( self , a = True , a = True , a = None , a = 20_48 , a = False , **a , ):
super().__init__(**a )
UpperCamelCase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase__ = do_normalize
UpperCamelCase__ = do_convert_rgb
UpperCamelCase__ = max_patches
UpperCamelCase__ = is_vqa
def __a ( self , a , a , a , **a ):
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase__ = to_channel_dimension_format(a , ChannelDimension.FIRST )
UpperCamelCase__ = torch.from_numpy(a )
UpperCamelCase__ , UpperCamelCase__ = patch_size["height"], patch_size["width"]
UpperCamelCase__ , UpperCamelCase__ = get_image_size(a )
# maximize scale s.t.
UpperCamelCase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase__ = max(min(math.floor(scale * image_height / patch_height ) , a ) , 1 )
UpperCamelCase__ = max(min(math.floor(scale * image_width / patch_width ) , a ) , 1 )
UpperCamelCase__ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase__ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase__ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=a , antialias=a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase__ = torch_extract_patches(a , a , a )
UpperCamelCase__ = patches.shape
UpperCamelCase__ = patches_shape[1]
UpperCamelCase__ = patches_shape[2]
UpperCamelCase__ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase__ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase__ = torch.arange(a ).reshape([rows, 1] ).repeat(1 , a ).reshape([rows * columns, 1] )
UpperCamelCase__ = torch.arange(a ).reshape([1, columns] ).repeat(a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase__ = row_ids.to(torch.floataa )
UpperCamelCase__ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase__ = torch.nn.functional.pad(a , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase__ = to_numpy_array(a )
return result
def __a ( self , a , a = None , **a ):
if image.dtype == np.uinta:
UpperCamelCase__ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase__ = np.mean(a )
UpperCamelCase__ = np.std(a )
UpperCamelCase__ = max(a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(a , mean=a , std=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase__ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase__ = self.is_vqa
if kwargs.get("data_format" , a ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase__ = kwargs.pop("font_bytes" , a )
UpperCamelCase__ = kwargs.pop("font_path" , a )
if isinstance(a , a ):
UpperCamelCase__ = [header_text] * len(a )
UpperCamelCase__ = [
render_header(a , header_text[i] , font_bytes=a , font_path=a )
for i, image in enumerate(a )
]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a ) for image in images]
# convert to torch tensor and permute
UpperCamelCase__ = [
self.extract_flattened_patches(image=a , max_patches=a , patch_size=a )
for image in images
]
# create attention mask in numpy
UpperCamelCase__ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase__ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=a )
return encoded_outputs
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception("Matrices are not 2x2" )
UpperCamelCase__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def _UpperCamelCase ( __A ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("Odd matrices are not supported!" )
UpperCamelCase__ = len(__A )
UpperCamelCase__ = matrix_length // 2
UpperCamelCase__ = [[a[i][j] for j in range(__A , __A )] for i in range(__A )]
UpperCamelCase__ = [
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A )]
UpperCamelCase__ = [[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def _UpperCamelCase ( __A ) -> tuple[int, int]:
'''simple docstring'''
return len(__A ), len(matrix[0] )
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
print("\n".join(str(__A ) for line in matrix ) )
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = split_matrix(__A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , __A )
UpperCamelCase__ = actual_strassen(__A , matrix_subtraction(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
UpperCamelCase__ = matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_addition(__A , __A )
UpperCamelCase__ = matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
UpperCamelCase__ = []
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _UpperCamelCase ( __A , __A ) -> list:
'''simple docstring'''
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
UpperCamelCase__ = (
"Unable to multiply these matrices, please check the dimensions.\n"
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__A )
UpperCamelCase__ = matrix_dimensions(__A )
UpperCamelCase__ = matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
UpperCamelCase__ = max(*__A , *__A )
UpperCamelCase__ = int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
UpperCamelCase__ = matrixa
UpperCamelCase__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
UpperCamelCase__ = actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : int = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : str = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(__A ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 80 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BICUBIC , a = True , a = None , a = True , a = 1 / 2_55 , a = True , a = None , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ = do_convert_rgb
def __a ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a , a , a = None , **a , ):
return normalize(a , mean=a , std=a , data_format=a , **a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , param_name="size" , default_to_square=a )
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" , default_to_square=a )
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
a__ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
a__ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
a__ : set[int] = {ord(char) for char in VALID_CHARS}
a__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _UpperCamelCase ( __A , __A ) -> str | None:
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for keychar, cipherchar in zip(cycle(__A ) , __A ):
UpperCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__A )
return decoded
def _UpperCamelCase ( __A ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
for key in product(__A , repeat=3 ):
UpperCamelCase__ = try_key(__A , __A )
if encoded is not None:
possibles.append(__A )
return possibles
def _UpperCamelCase ( __A , __A ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCamelCase ( __A = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = Path(__A ).parent.joinpath(__A ).read_text(encoding="utf-8" )
UpperCamelCase__ = [int(__A ) for number in data.strip().split("," )]
UpperCamelCase__ = filter_valid_chars(__A )
for common_word in COMMON_WORDS:
UpperCamelCase__ = filter_common_word(__A , __A )
if len(__A ) == 1:
break
UpperCamelCase__ = possibles[0]
return sum(ord(__A ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = CLIPTokenizer
__UpperCAmelCase = CLIPTokenizerFast
__UpperCAmelCase = True
__UpperCAmelCase = {}
__UpperCAmelCase = False
def __a ( self ):
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a , range(len(a ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def __a ( self , **a ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def __a ( self , a ):
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def __a ( self ):
UpperCamelCase__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a )
self.assertListEqual(a , a )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def __a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a , **a )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a )
UpperCamelCase__ = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def __a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
UpperCamelCase__ = f''' {text}'''
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
UpperCamelCase__ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def __a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ):
super().test_tokenization_python_rust_equals()
def __a ( self ):
# CLIP always lower cases letters
pass
| 80 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = data
UpperCamelCase__ = None
class lowercase_ :
def __init__( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
def __iter__( self ):
UpperCamelCase__ = self.head
while self.head:
yield node.data
UpperCamelCase__ = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(a ) for item in iter(self ) )
def __a ( self , a ):
self.insert_nth(len(self ) , a )
def __a ( self , a ):
self.insert_nth(0 , a )
def __a ( self , a , a ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCamelCase__ = Node(a )
if self.head is None:
UpperCamelCase__ = new_node # first node points itself
UpperCamelCase__ = UpperCamelCase__ = new_node
elif index == 0: # insert at head
UpperCamelCase__ = self.head
UpperCamelCase__ = UpperCamelCase__ = new_node
else:
UpperCamelCase__ = self.head
for _ in range(index - 1 ):
UpperCamelCase__ = temp.next
UpperCamelCase__ = temp.next
UpperCamelCase__ = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase__ = new_node
def __a ( self ):
return self.delete_nth(0 )
def __a ( self ):
return self.delete_nth(len(self ) - 1 )
def __a ( self , a = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCamelCase__ = self.head
if self.head == self.tail: # just one node
UpperCamelCase__ = UpperCamelCase__ = None
elif index == 0: # delete head node
UpperCamelCase__ = self.tail.next.next
UpperCamelCase__ = self.head.next
else:
UpperCamelCase__ = self.head
for _ in range(index - 1 ):
UpperCamelCase__ = temp.next
UpperCamelCase__ = temp.next
UpperCamelCase__ = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase__ = temp
return delete_node.data
def __a ( self ):
return len(self ) == 0
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = CircularLinkedList()
assert len(__A ) == 0
assert circular_linked_list.is_empty() is True
assert str(__A ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__A ) == i
circular_linked_list.insert_nth(__A , i + 1 )
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__A ) == "->".join(str(__A ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__A ) == "->".join(str(__A ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
a__ : Optional[int] = re.compile(R'\b(a|an|the)\b', re.UNICODE)
a__ : int = None
def _UpperCamelCase ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def _UpperCamelCase ( __A ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(__A ):
return ARTICLES_REGEX.sub(" " , __A )
def white_space_fix(__A ):
return " ".join(text.split() )
def remove_punc(__A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if not s:
return []
return normalize_answer(__A ).split()
def _UpperCamelCase ( __A , __A ) -> List[Any]:
'''simple docstring'''
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = get_tokens(__A )
UpperCamelCase__ = collections.Counter(__A ) & collections.Counter(__A )
UpperCamelCase__ = sum(common.values() )
if len(__A ) == 0 or len(__A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = 1.0 * num_same / len(__A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCamelCase ( __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["id"]
UpperCamelCase__ = [t for t in qa["answers"]["text"] if normalize_answer(__A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(__A , __A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(__A , __A ) for a in gold_answers )
return exact_scores, fa_scores
def _UpperCamelCase ( __A , __A , __A , __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def _UpperCamelCase ( __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
UpperCamelCase__ = len(__A )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def _UpperCamelCase ( __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def _UpperCamelCase ( __A , __A , __A , __A ) -> Optional[int]:
'''simple docstring'''
plt.step(__A , __A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__A , __A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__A )
plt.savefig(__A )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(__A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(__A )
if i == len(__A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__A )
recalls.append(__A )
if out_image:
plot_pr_curve(__A , __A , __A , __A )
return {"ap": 100.0 * avg_prec}
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__A ):
os.makedirs(__A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
UpperCamelCase__ = {k: float(__A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
__A , __A , __A , __A , out_image=os.path.join(__A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__A , __A , "pr_exact" )
merge_eval(__A , __A , "pr_f1" )
merge_eval(__A , __A , "pr_oracle" )
def _UpperCamelCase ( __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(__A ) / float(len(__A ) )
plt.hist(__A , weights=__A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _UpperCamelCase ( __A , __A , __A , __A ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(__A , key=lambda __A : na_probs[k] )
for i, qid in enumerate(__A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 100.0 * best_score / len(__A ), best_thresh
def _UpperCamelCase ( __A , __A , __A , __A , __A , __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(__A , __A , __A , __A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(__A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(__A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(__A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(__A , __A )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(__A , __A , __A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(__A , __A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "HasAns" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(__A , __A , qid_list=__A )
merge_eval(__A , __A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__A , __A , __A , __A , __A , __A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__A , __A , __A , __A , __A , OPTS.out_image_dir )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__A , __A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__A , __A )
else:
print(json.dumps(__A , indent=2 ) )
if __name__ == "__main__":
a__ : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 80 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
a__ : int = re.compile(R'([A-Z]+)([A-Z][a-z])')
a__ : str = re.compile(R'([a-z\d])([A-Z])')
a__ : Tuple = re.compile(R'(?<!_)_(?!_)')
a__ : Union[str, Any] = re.compile(R'(_{2,})')
a__ : Dict = R'^\w+(\.\w+)*$'
a__ : Optional[Any] = R'<>:/\|?*'
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = _uppercase_uppercase_re.sub(R"\1_\2" , __A )
UpperCamelCase__ = _lowercase_uppercase_re.sub(R"\1_\2" , __A )
return name.lower()
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = _single_underscore_re.split(__A )
UpperCamelCase__ = [_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != "" )
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(__A )
def _UpperCamelCase ( __A , __A ) -> str:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , __A ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(__A )}-{split}'''
def _UpperCamelCase ( __A , __A , __A , __A=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__A , __A )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
UpperCamelCase__ = os.path.join(__A , __A )
return F'''{filepath}*'''
def _UpperCamelCase ( __A , __A , __A , __A=None , __A=None ) -> Any:
'''simple docstring'''
UpperCamelCase__ = filename_prefix_for_split(__A , __A )
UpperCamelCase__ = os.path.join(__A , __A )
if shard_lengths:
UpperCamelCase__ = len(__A )
UpperCamelCase__ = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__A )]
if filetype_suffix:
UpperCamelCase__ = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
UpperCamelCase__ = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 80 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a__ : Optional[List[str]] = None
a__ : Dict = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a__ : Any = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class lowercase_ :
__UpperCAmelCase = True
__UpperCAmelCase = None
# Automatically constructed
__UpperCAmelCase = "PIL.Image.Image"
__UpperCAmelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__UpperCAmelCase = field(default='Image' , init=a__ , repr=a__ )
def __call__( self ):
return self.pa_type
def __a ( self , a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a , a ):
UpperCamelCase__ = np.array(a )
if isinstance(a , a ):
return {"path": value, "bytes": None}
elif isinstance(a , a ):
return {"path": None, "bytes": value}
elif isinstance(a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a )
elif isinstance(a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __a ( self , a , a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(a ):
UpperCamelCase__ = PIL.Image.open(a )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a , config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a )
except ValueError:
UpperCamelCase__ = None
with xopen(a , "rb" , use_auth_token=a ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __a ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __a ( self , a ):
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase__ = pa.array([None] * len(a ) , type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def __a ( self , a ):
@no_op_if_value_is_null
def path_to_bytes(a ):
with xopen(a , "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase__ = pa.array(
[os.path.basename(a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(a , self.pa_type )
def _UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCamelCase ( __A ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__A , format=__A )
return buffer.getvalue()
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if hasattr(__A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(__A )
UpperCamelCase__ = np.dtype(__A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(__A ) )
return {"path": None, "bytes": image_to_bytes(__A )}
def _UpperCamelCase ( __A ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(__A )
if isinstance(__A , __A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__A , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
elif isinstance(__A , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(__A )
return [obj_to_image_dict_func(__A ) for obj in objs]
else:
return objs
else:
return objs
| 80 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( a__ , unittest.TestCase ):
__UpperCAmelCase = DDIMPipeline
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__UpperCAmelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase = False
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = {"unet": unet, "scheduler": scheduler}
return components
def __a ( self , a , a=0 ):
if str(a ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a )
else:
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a )
UpperCamelCase__ = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ):
UpperCamelCase__ = "cpu"
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = self.get_dummy_inputs(a )
UpperCamelCase__ = pipe(**a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
UpperCamelCase__ = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
UpperCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1e-3 )
def __a ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __a ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __a ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
UpperCamelCase__ = "google/ddpm-cifar10-32"
UpperCamelCase__ = UNetaDModel.from_pretrained(a )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = DDIMPipeline(unet=a , scheduler=a )
ddim.to(a )
ddim.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ddim(generator=a , eta=0.0 , output_type="numpy" ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ):
UpperCamelCase__ = "google/ddpm-ema-bedroom-256"
UpperCamelCase__ = UNetaDModel.from_pretrained(a )
UpperCamelCase__ = DDIMScheduler.from_pretrained(a )
UpperCamelCase__ = DDIMPipeline(unet=a , scheduler=a )
ddpm.to(a )
ddpm.set_progress_bar_config(disable=a )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = ddpm(generator=a , output_type="numpy" ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> None:
'''simple docstring'''
UpperCamelCase__ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = []
for current_row_idx in range(__A ):
UpperCamelCase__ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def _UpperCamelCase ( __A , __A ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def _UpperCamelCase ( __A , __A , __A , __A , ) -> None:
'''simple docstring'''
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCamelCase__ = triangle[current_row_idx - 1][current_col_idx]
UpperCamelCase__ = above_to_left_elt + above_to_right_elt
def _UpperCamelCase ( __A ) -> list[list[int]]:
'''simple docstring'''
if not isinstance(__A , __A ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCamelCase__ = [[1]]
for row_index in range(1 , __A ):
UpperCamelCase__ = [0] + result[-1] + [0]
UpperCamelCase__ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCamelCase__ = sum(divmod(__A , 2 ) )
UpperCamelCase__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCamelCase__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCamelCase__ = row_first_half + row_second_half
result.append(__A )
return result
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A , __A ) -> None:
UpperCamelCase__ = F'''{func.__name__}({value})'''
UpperCamelCase__ = timeit(F'''__main__.{call}''' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowercase_ ( a__ , a__ ):
__UpperCAmelCase = 'pixel_values'
__UpperCAmelCase = False
__UpperCAmelCase = TimmBackboneConfig
def __init__( self , a , **a ):
requires_backends(self , "timm" )
super().__init__(a )
UpperCamelCase__ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(a , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
UpperCamelCase__ = getattr(a , "use_pretrained_backbone" , a )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase__ = config.out_indices if getattr(a , "out_indices" , a ) is not None else (-1,)
UpperCamelCase__ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase__ = self._backbone.return_layers
UpperCamelCase__ = {layer["module"]: str(a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a )
@classmethod
def __a ( cls , a , *a , **a ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase__ = kwargs.pop("config" , TimmBackboneConfig() )
UpperCamelCase__ = kwargs.pop("use_timm_backbone" , a )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
UpperCamelCase__ = kwargs.pop("num_channels" , config.num_channels )
UpperCamelCase__ = kwargs.pop("features_only" , config.features_only )
UpperCamelCase__ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
UpperCamelCase__ = kwargs.pop("out_indices" , config.out_indices )
UpperCamelCase__ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a )
def __a ( self , a ):
pass
def __a ( self , a , a=None , a=None , a=None , **a ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase__ = self._all_layers
UpperCamelCase__ = self._backbone(a , **a )
UpperCamelCase__ = self._return_layers
UpperCamelCase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase__ = self._backbone(a , **a )
UpperCamelCase__ = None
UpperCamelCase__ = tuple(a )
UpperCamelCase__ = tuple(a ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase__ = (feature_maps,)
if output_hidden_states:
UpperCamelCase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a )
| 80 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> List[str]:
'''simple docstring'''
with open(__A ) as metadata_file:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = LukeConfig(use_entity_aware_attention=__A , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ = torch.load(__A , map_location="cpu" )["module"]
# Load the entity vocab file
UpperCamelCase__ = load_original_entity_vocab(__A )
# add an entry for [MASK2]
UpperCamelCase__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCamelCase__ = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ = AddedToken("<ent>" , lstrip=__A , rstrip=__A )
UpperCamelCase__ = AddedToken("<ent2>" , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , "tokenizer_config.json" ) , "r" ) as f:
UpperCamelCase__ = json.load(__A )
UpperCamelCase__ = "MLukeTokenizer"
with open(os.path.join(__A , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__A , __A )
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["@"] )[0]
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(["#"] )[0]
UpperCamelCase__ = state_dict["embeddings.word_embeddings.weight"]
UpperCamelCase__ = word_emb[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = word_emb[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCamelCase__ = state_dict[bias_name]
UpperCamelCase__ = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCamelCase__ = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCamelCase__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ = F'''encoder.layer.{layer_index}.attention.self.'''
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
UpperCamelCase__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ = state_dict["entity_embeddings.entity_embeddings.weight"]
UpperCamelCase__ = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCamelCase__ = state_dict["entity_predictions.bias"]
UpperCamelCase__ = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
UpperCamelCase__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCamelCase__ = LukeForMaskedLM(config=__A ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
UpperCamelCase__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
UpperCamelCase__ = state_dict[key]
else:
UpperCamelCase__ = state_dict[key]
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A , task="entity_classification" )
UpperCamelCase__ = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
UpperCamelCase__ = (0, 9)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 33, 768) )
UpperCamelCase__ = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCamelCase__ = torch.Size((1, 1, 768) )
UpperCamelCase__ = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCamelCase__ = MLukeTokenizer.from_pretrained(__A )
UpperCamelCase__ = "Tokyo is the capital of <mask>."
UpperCamelCase__ = (24, 30)
UpperCamelCase__ = tokenizer(__A , entity_spans=[span] , return_tensors="pt" )
UpperCamelCase__ = model(**__A )
UpperCamelCase__ = encoding["input_ids"][0].tolist()
UpperCamelCase__ = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
UpperCamelCase__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
UpperCamelCase__ = outputs.entity_logits[0][0].argmax().item()
UpperCamelCase__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__A ) )
model.save_pretrained(__A )
def _UpperCamelCase ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = ["[MASK]", "[PAD]", "[UNK]"]
UpperCamelCase__ = [json.loads(__A ) for line in open(__A )]
UpperCamelCase__ = {}
for entry in data:
UpperCamelCase__ = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCamelCase__ = entity_id
break
UpperCamelCase__ = F'''{language}:{entity_name}'''
UpperCamelCase__ = entity_id
return new_mapping
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
a__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a__ : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
__UpperCAmelCase = ['pixel_values']
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = 1 / 2_55 , a = True , a = None , a = True , **a , ):
super().__init__(**a )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_flip_channel_order
def __a ( self , a , a , a = PIL.Image.BILINEAR , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase__ = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
UpperCamelCase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __a ( self , a , a , a = None , **a , ):
return rescale(a , scale=a , data_format=a , **a )
def __a ( self , a , a = None ):
return flip_channel_order(a , data_format=a )
def __a ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ):
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a , default_to_square=a )
UpperCamelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ = get_size_dict(a , param_name="crop_size" )
UpperCamelCase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
UpperCamelCase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a , scale=a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ = [self.flip_channel_order(image=a ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a , a ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
def __a ( self , a , a = None ):
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=a )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 80 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'lilt'
def __init__( self , a=3_05_22 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=None , a=4 , a=10_24 , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = channel_shrink_ratio
UpperCamelCase__ = max_ad_position_embeddings
| 80 | 1 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a__ : str = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _UpperCamelCase ( __A , __A ) -> List[str]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
return (preds == labels).mean()
def _UpperCamelCase ( __A , __A ) -> Optional[int]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCamelCase__ = simple_accuracy(__A , __A )
UpperCamelCase__ = fa_score(y_true=__A , y_pred=__A )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCamelCase ( __A , __A ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
UpperCamelCase__ = pearsonr(__A , __A )[0]
UpperCamelCase__ = spearmanr(__A , __A )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCamelCase ( __A , __A , __A ) -> Dict:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
assert len(__A ) == len(__A ), F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__A , __A )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "mrpc":
return acc_and_fa(__A , __A )
elif task_name == "sts-b":
return pearson_and_spearman(__A , __A )
elif task_name == "qqp":
return acc_and_fa(__A , __A )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__A , __A )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__A , __A )}
elif task_name == "qnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "rte":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "wnli":
return {"acc": simple_accuracy(__A , __A )}
elif task_name == "hans":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
def _UpperCamelCase ( __A , __A , __A ) -> Any:
'''simple docstring'''
warnings.warn(__A , __A )
requires_backends(__A , "sklearn" )
if len(__A ) != len(__A ):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__A )} and {len(__A )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(__A , __A )}
else:
raise KeyError(__A )
| 80 |
'''simple docstring'''
a__ : Union[str, Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a__ : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
a__ : Optional[Any] = True
a__ : Optional[Any] = False
def _UpperCamelCase ( __A ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__A ) )
UpperCamelCase__ = number_chain
while number < 10000000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def _UpperCamelCase ( __A = 10000000 ) -> int:
'''simple docstring'''
for i in range(1 , __A ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 80 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
a__ : Any = logging.get_logger(__name__)
a__ : List[str] = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'longformer'
def __init__( self , a = 5_12 , a = 2 , a = 1 , a = 0 , a = 2 , a = 3_05_22 , a = 7_68 , a = 12 , a = 12 , a = 30_72 , a = "gelu" , a = 0.1 , a = 0.1 , a = 5_12 , a = 2 , a = 0.02 , a = 1e-12 , a = False , **a , ):
super().__init__(pad_token_id=a , **a )
UpperCamelCase__ = attention_window
UpperCamelCase__ = sep_token_id
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = onnx_export
class lowercase_ ( a__ ):
def __init__( self , a , a = "default" , a = None ):
super().__init__(a , a , a )
UpperCamelCase__ = True
@property
def __a ( self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def __a ( self ):
UpperCamelCase__ = super().outputs
if self.task == "default":
UpperCamelCase__ = {0: "batch"}
return outputs
@property
def __a ( self ):
return 1e-4
@property
def __a ( self ):
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def __a ( self , a , a = -1 , a = -1 , a = False , a = None , ):
UpperCamelCase__ = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCamelCase__ = torch.zeros_like(inputs["input_ids"] )
# make every second token global
UpperCamelCase__ = 1
return inputs
| 80 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _UpperCamelCase ( __A , __A , __A=1024 , __A=1024 , __A=False , **__A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AutoTokenizer.from_pretrained(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase__ = tok.pad_token_id
def get_lens(__A ):
UpperCamelCase__ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase__ = []
for batch in dl:
UpperCamelCase__ = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase__ = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase__ = get_lens(__A )
UpperCamelCase__ = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase__ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( a__ , a__ , a__ , unittest.TestCase ):
__UpperCAmelCase = StableDiffusionInpaintPipeline
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase = frozenset([] )
def __a ( self ):
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
UpperCamelCase__ = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
UpperCamelCase__ = CLIPTextModel(a )
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , a , a=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
UpperCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(a ) ).convert("RGB" ).resize((64, 64) )
UpperCamelCase__ = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a )
else:
UpperCamelCase__ = torch.Generator(device=a ).manual_seed(a )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ):
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = StableDiffusionInpaintPipeline(**a )
UpperCamelCase__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
UpperCamelCase__ = self.get_dummy_inputs(a )
UpperCamelCase__ = sd_pipe(**a ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def __a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def __a ( self ):
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="np" , )
UpperCamelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
UpperCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
UpperCamelCase__ = "stabilityai/stable-diffusion-2-inpainting"
UpperCamelCase__ = PNDMScheduler.from_pretrained(a , subfolder="scheduler" )
UpperCamelCase__ = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__ = "Face of a yellow cat, high resolution, sitting on a park bench"
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 80 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
a__ : Optional[int] = {'vocab_file': 'vocab.txt'}
a__ : Optional[Any] = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
a__ : Optional[int] = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
with open(__A , "r" ) as f:
UpperCamelCase__ = f.read().splitlines()
return [l.strip() for l in lines]
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self , a , a="<unk>" , a="<cls>" , a="<pad>" , a="<mask>" , a="<eos>" , **a , ):
super().__init__(**a )
UpperCamelCase__ = load_vocab_file(a )
UpperCamelCase__ = dict(enumerate(self.all_tokens ) )
UpperCamelCase__ = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase__ = unk_token
UpperCamelCase__ = cls_token
UpperCamelCase__ = pad_token
UpperCamelCase__ = mask_token
UpperCamelCase__ = eos_token
UpperCamelCase__ = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a , **a ):
return text.split()
def __a ( self , a=False ):
return len(self._id_to_token )
def __a ( self ):
return {token: i for i, token in enumerate(self.all_tokens )}
def __a ( self , a ):
return self._token_to_id.get(a , self._token_to_id.get(self.unk_token ) )
def __a ( self , a ):
return self._id_to_token.get(a , self.unk_token )
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def __a ( self , a , a = None , a = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase__ = [1] + ([0] * len(a )) + [1]
if token_ids_a is not None:
mask += [0] * len(a ) + [1]
return mask
def __a ( self , a , a ):
UpperCamelCase__ = os.path.join(a , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(a , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def __a ( self ):
return self.get_vocab_size(with_added_tokens=a )
def __a ( self , a , a = False ):
return super()._add_tokens(a , special_tokens=a )
| 80 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A , __A = False ) -> str:
'''simple docstring'''
if not isinstance(__A , __A ):
UpperCamelCase__ = F'''Expected string as input, found {type(__A )}'''
raise ValueError(__A )
if not isinstance(__A , __A ):
UpperCamelCase__ = F'''Expected boolean as use_pascal parameter, found {type(__A )}'''
raise ValueError(__A )
UpperCamelCase__ = input_str.split("_" )
UpperCamelCase__ = 0 if use_pascal else 1
UpperCamelCase__ = words[start_index:]
UpperCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase__ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 80 |
'''simple docstring'''
from math import factorial, pi
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__A ) )
def _UpperCamelCase ( __A , __A = 30 ) -> float:
'''simple docstring'''
if not isinstance(__A , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__A , __A ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCamelCase__ = float(__A )
UpperCamelCase__ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 80 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
def __init__( self , a = "cpu" , a = "openai/clip-vit-large-patch14" ):
UpperCamelCase__ = device
UpperCamelCase__ = CLIPTokenizerFast.from_pretrained(a )
UpperCamelCase__ = [0.4814_5466, 0.457_8275, 0.4082_1073]
UpperCamelCase__ = [0.2686_2954, 0.2613_0258, 0.2757_7711]
UpperCamelCase__ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCamelCase__ = torchvision.transforms.Resize(2_24 )
UpperCamelCase__ = torchvision.transforms.CenterCrop(2_24 )
def __a ( self , a ):
UpperCamelCase__ = self.resize(a )
UpperCamelCase__ = self.center_crop(a )
UpperCamelCase__ = self.normalize(a )
return images
def __call__( self , a=None , a=None , **a ):
UpperCamelCase__ = self.tokenizer(text=a , **a )
UpperCamelCase__ = self.preprocess_img(a )
UpperCamelCase__ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ ( nn.Module ):
def __init__( self , a=10 , a=0.01 , a=None , a=None , a=None , a=None , a=None , a=None , a=False , a=True , a="image" , a=True , a=False , a=False , a=False , ):
super().__init__()
UpperCamelCase__ = None
UpperCamelCase__ = device if device else get_device()
if vqgan:
UpperCamelCase__ = vqgan
else:
UpperCamelCase__ = load_vqgan(self.device , conf_path=a , ckpt_path=a )
self.vqgan.eval()
if clip:
UpperCamelCase__ = clip
else:
UpperCamelCase__ = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
UpperCamelCase__ = ProcessorGradientFlow(device=self.device )
UpperCamelCase__ = iterations
UpperCamelCase__ = lr
UpperCamelCase__ = log
UpperCamelCase__ = make_grid
UpperCamelCase__ = return_val
UpperCamelCase__ = quantize
UpperCamelCase__ = self.vqgan.decoder.z_shape
def __a ( self , a=None , a=None , a=5 , a=True ):
UpperCamelCase__ = []
if output_path is None:
UpperCamelCase__ = "./animation.gif"
if input_path is None:
UpperCamelCase__ = self.save_path
UpperCamelCase__ = sorted(glob(input_path + "/*" ) )
if not len(a ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(a ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
UpperCamelCase__ = total_duration / len(a )
UpperCamelCase__ = [frame_duration] * len(a )
if extend_frames:
UpperCamelCase__ = 1.5
UpperCamelCase__ = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(a ) )
imageio.mimsave(a , a , duration=a )
print(f'''gif saved to {output_path}''' )
def __a ( self , a=None , a=None ):
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
UpperCamelCase__ = preprocess(Image.open(a ) , target_image_size=2_56 ).to(self.device )
UpperCamelCase__ = preprocess_vqgan(a )
UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.encode(a )
return z
def __a ( self , a ):
UpperCamelCase__ = self.latent.detach().requires_grad_()
UpperCamelCase__ = base_latent + transform_vector
if self.quantize:
UpperCamelCase__ , *UpperCamelCase__ = self.vqgan.quantize(a )
else:
UpperCamelCase__ = trans_latent
return self.vqgan.decode(a )
def __a ( self , a , a , a=None ):
UpperCamelCase__ = self.clip_preprocessor(text=a , images=a , return_tensors="pt" , padding=a )
UpperCamelCase__ = self.clip(**a )
UpperCamelCase__ = clip_outputs.logits_per_image
if weights is not None:
UpperCamelCase__ = similarity_logits * weights
return similarity_logits.sum()
def __a ( self , a , a , a ):
UpperCamelCase__ = self._get_clip_similarity(pos_prompts["prompts"] , a , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
UpperCamelCase__ = self._get_clip_similarity(neg_prompts["prompts"] , a , weights=neg_prompts["weights"] )
else:
UpperCamelCase__ = torch.tensor([1] , device=self.device )
UpperCamelCase__ = -torch.log(a ) + torch.log(a )
return loss
def __a ( self , a , a , a ):
UpperCamelCase__ = torch.randn_like(self.latent , requires_grad=a , device=self.device )
UpperCamelCase__ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCamelCase__ = self._add_vector(a )
UpperCamelCase__ = loop_post_process(a )
UpperCamelCase__ = self._get_CLIP_loss(a , a , a )
print("CLIP loss" , a )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def __a ( self , a , a , a ):
wandb.init(reinit=a , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
UpperCamelCase__ = Image.open(a )
UpperCamelCase__ = image.resize((2_56, 2_56) )
wandb.log("Original Image" , wandb.Image(a ) )
def __a ( self , a ):
if not prompts:
return []
UpperCamelCase__ = []
UpperCamelCase__ = []
if isinstance(a , a ):
UpperCamelCase__ = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(a , (tuple, list) ):
UpperCamelCase__ = prompt[0]
UpperCamelCase__ = float(prompt[1] )
elif ":" in prompt:
UpperCamelCase__ , UpperCamelCase__ = prompt.split(":" )
UpperCamelCase__ = float(a )
else:
UpperCamelCase__ = prompt
UpperCamelCase__ = 1.0
processed_prompts.append(a )
weights.append(a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(a , device=self.device ),
}
def __a ( self , a , a=None , a=None , a=True , a=False , a=True , a=True , a=None , ):
if image_path:
UpperCamelCase__ = self._get_latent(a )
else:
UpperCamelCase__ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(a , a , a )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCamelCase__ = self.process_prompts(a )
UpperCamelCase__ = self.process_prompts(a )
if save_final and save_path is None:
UpperCamelCase__ = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(a ):
os.makedirs(a )
else:
UpperCamelCase__ = save_path + "_" + get_timestamp()
os.makedirs(a )
UpperCamelCase__ = save_path
UpperCamelCase__ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(a ) )
UpperCamelCase__ = loop_post_process(a )
for iter, transformed_img in enumerate(self._optimize_CLIP(a , a , a ) ):
if show_intermediate:
show_pil(a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(a )} )
if show_final:
show_pil(a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 80 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ ( a__ ):
def __init__( self , a , a , a = None , a = None , a = False , **a , ):
super().__init__(features=a , cache_dir=a , keep_in_memory=a , **a )
UpperCamelCase__ = Sql(
cache_dir=a , features=a , sql=a , con=a , **a , )
def __a ( self ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , )
# Build dataset for splits
UpperCamelCase__ = self.builder.as_dataset(
split="train" , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
def __init__( self , a , a , a , a = None , a = None , **a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase__ = dataset
UpperCamelCase__ = name
UpperCamelCase__ = con
UpperCamelCase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase__ = num_proc
UpperCamelCase__ = to_sql_kwargs
def __a ( self ):
UpperCamelCase__ = self.to_sql_kwargs.pop("sql" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("con" , a )
UpperCamelCase__ = self.to_sql_kwargs.pop("index" , a )
UpperCamelCase__ = self._write(index=a , **self.to_sql_kwargs )
return written
def __a ( self , a ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = args
UpperCamelCase__ = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
UpperCamelCase__ = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase__ = batch.to_pandas()
UpperCamelCase__ = df.to_sql(self.name , self.con , index=a , **a )
return num_rows or len(a )
def __a ( self , a , **a ):
UpperCamelCase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCamelCase__ , UpperCamelCase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 80 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = cva.getAffineTransform(__A , __A )
return cva.warpAffine(__A , __A , (rows, cols) )
if __name__ == "__main__":
# read original image
a__ : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
a__ : Optional[int] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a__ , a__ : Union[str, Any] = gray_img.shape
# set different points to rotate image
a__ : int = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a__ : Tuple = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a__ : Union[str, Any] = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a__ : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a__ : List[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a__ : str = plt.figure(1)
a__ : str = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 80 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
a__ : Any = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int:
'''simple docstring'''
UpperCamelCase__ = True
while ask_again:
UpperCamelCase__ = input(__A )
try:
if default is not None and len(__A ) == 0:
return default
return convert_value(__A ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__A )
def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ = BulletMenu(__A , __A )
UpperCamelCase__ = menu.run(default_choice=__A )
return convert_value(__A ) if convert_value is not None else result
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _UpperCamelCase ( __A ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _UpperCamelCase ( __A ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _UpperCamelCase ( __A ) -> Any:
'''simple docstring'''
UpperCamelCase__ = int(__A )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _UpperCamelCase ( __A ) -> Dict:
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class lowercase_ ( argparse.RawDescriptionHelpFormatter ):
def __a ( self , a , a , a , a ):
UpperCamelCase__ = super()._format_usage(a , a , a , a )
UpperCamelCase__ = usage.replace("<command> [<args>] " , "" )
return usage
| 80 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.