code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowercase : Tuple = pd.read_csv("""sample_data.csv""", header=None)
_lowercase : str = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowercase : str = df.iloc[:, 1:2]
_lowercase : Optional[int] = actual_data.values.reshape(len_data, 1)
_lowercase : Union[str, Any] = MinMaxScaler().fit_transform(actual_data)
_lowercase : List[str] = 10
_lowercase : Union[str, Any] = 5
_lowercase : Dict = 20
_lowercase : List[str] = len_data - periods * look_back
_lowercase : Optional[Any] = actual_data[:division]
_lowercase : Dict = actual_data[division - look_back :]
_lowercase , _lowercase : Dict = [], []
_lowercase , _lowercase : Dict = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowercase : List[str] = np.array(train_x)
_lowercase : Optional[Any] = np.array(test_x)
_lowercase : List[str] = np.array([list(i.ravel()) for i in train_y])
_lowercase : List[Any] = np.array([list(i.ravel()) for i in test_y])
_lowercase : List[str] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_lowercase : List[str] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowercase : Union[str, Any] = model.predict(x_test)
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
from typing import Any
import numpy as np
def lowerCamelCase__ ( A : np.ndarray ):
'''simple docstring'''
return np.array_equal(snake_case__ , matrix.conjugate().T )
def lowerCamelCase__ ( A : np.ndarray , A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase = v.conjugate().T
UpperCAmelCase = v_star.dot(snake_case__ )
assert isinstance(snake_case__ , np.ndarray )
return (v_star_dot.dot(snake_case__ )) / (v_star.dot(snake_case__ ))
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
UpperCAmelCase = np.array([[1], [2], [3]] )
assert is_hermitian(snake_case__ ), f"""{a} is not hermitian."""
print(rayleigh_quotient(snake_case__ , snake_case__ ) )
UpperCAmelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(snake_case__ ), f"""{a} is not hermitian."""
assert rayleigh_quotient(snake_case__ , snake_case__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_lowercase : int = logging.getLogger(__name__)
class UpperCamelCase__:
def __init__( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = False
def a__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
if not self.initialized:
UpperCAmelCase = RagRetriever(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
UpperCAmelCase = True
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
self.retriever.index.init_index()
def a__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : int )-> str:
"""simple docstring"""
UpperCAmelCase = self.retriever._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=None )-> Optional[Any]:
"""simple docstring"""
if index is not None and index.is_initialized() and len(UpperCAmelCase__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , index=UpperCAmelCase__ , init_retrieval=UpperCAmelCase__ , )
UpperCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
for worker in self.retrieval_workers
] )
def a__( self : List[Any] )-> int:
"""simple docstring"""
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def a__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] )-> Any:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase = ray.get(random_worker.retrieve.remote(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
UpperCAmelCase = self._main_retrieve(UpperCAmelCase__ , UpperCAmelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCAmelCase__ )
@classmethod
def a__( cls : Dict , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : str )-> Dict:
"""simple docstring"""
return super(UpperCAmelCase__ , cls ).get_tokenizers(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def a__( cls : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = kwargs.pop('''config''' , UpperCAmelCase__ ) or RagConfig.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
UpperCAmelCase = RagTokenizer.from_pretrained(UpperCAmelCase__ , config=UpperCAmelCase__ )
UpperCAmelCase = rag_tokenizer.question_encoder
UpperCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase = '''custom'''
UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase__ )
else:
UpperCAmelCase = cls._build_index(UpperCAmelCase__ )
return cls(
UpperCAmelCase__ , question_encoder_tokenizer=UpperCAmelCase__ , generator_tokenizer=UpperCAmelCase__ , retrieval_workers=UpperCAmelCase__ , index=UpperCAmelCase__ , )
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__( SCREAMING_SNAKE_CASE__ ):
def __init__( self : str , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(snake_case__ )
def __call__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Dict , )-> str:
"""simple docstring"""
if "text_queries" in kwargs:
UpperCAmelCase = kwargs.pop('''text_queries''' )
if isinstance(snake_case__ , (str, Image.Image) ):
UpperCAmelCase = {"image": image, "candidate_labels": candidate_labels}
else:
UpperCAmelCase = image
UpperCAmelCase = super().__call__(snake_case__ , **snake_case__ )
return results
def a__( self : Any , **lowerCAmelCase : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = {}
if "threshold" in kwargs:
UpperCAmelCase = kwargs["threshold"]
if "top_k" in kwargs:
UpperCAmelCase = kwargs["top_k"]
return {}, {}, postprocess_params
def a__( self : Tuple , lowerCAmelCase : int )-> Any:
"""simple docstring"""
UpperCAmelCase = load_image(inputs['''image'''] )
UpperCAmelCase = inputs["candidate_labels"]
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = candidate_labels.split(''',''' )
UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
UpperCAmelCase = self.tokenizer(snake_case__ , return_tensors=self.framework )
UpperCAmelCase = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def a__( self : Any , lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = model_inputs.pop('''target_size''' )
UpperCAmelCase = model_inputs.pop('''candidate_label''' )
UpperCAmelCase = model_inputs.pop('''is_last''' )
UpperCAmelCase = self.model(**snake_case__ )
UpperCAmelCase = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def a__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]=None )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
for model_output in model_outputs:
UpperCAmelCase = model_output["candidate_label"]
UpperCAmelCase = BaseModelOutput(snake_case__ )
UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
UpperCAmelCase = outputs["scores"][index].item()
UpperCAmelCase = self._get_bounding_box(outputs['''boxes'''][index][0] )
UpperCAmelCase = {"score": score, "label": label, "box": box}
results.append(snake_case__ )
UpperCAmelCase = sorted(snake_case__ , key=lambda lowerCAmelCase : x["score"] , reverse=snake_case__ )
if top_k:
UpperCAmelCase = results[:top_k]
return results
def a__( self : Optional[Any] , lowerCAmelCase : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
UpperCAmelCase = box.int().tolist()
UpperCAmelCase = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
from PIL import Image
def lowerCamelCase__ ( A : Image , A : int ):
UpperCAmelCase = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(A : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(__a )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_lowercase : Optional[Any] = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[int] = """https://openaipublic.azureedge.net/jukebox/models/"""
_lowercase : int = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCAmelCase = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCAmelCase = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
UpperCAmelCase = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
UpperCAmelCase = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
UpperCAmelCase = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
UpperCAmelCase = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
UpperCAmelCase = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
UpperCAmelCase = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def lowerCamelCase__ ( A : int , A : int , A : List[str] , A : List[str] ):
'''simple docstring'''
UpperCAmelCase = {}
import re
UpperCAmelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
UpperCAmelCase = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
UpperCAmelCase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A ):
UpperCAmelCase = re_encoder_block_conv_in.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase = re_encoder_block_conv_in.sub(A , A )
elif re_encoder_block_resnet.fullmatch(A ):
UpperCAmelCase = re_encoder_block_resnet.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase = prefix + resnet_block
UpperCAmelCase = re_encoder_block_resnet.sub(A , A )
elif re_encoder_block_proj_out.fullmatch(A ):
UpperCAmelCase = re_encoder_block_proj_out.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
UpperCAmelCase = re_encoder_block_proj_out.sub(A , A )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A ):
UpperCAmelCase = re_decoder_block_conv_out.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase = re_decoder_block_conv_out.sub(A , A )
elif re_decoder_block_resnet.fullmatch(A ):
UpperCAmelCase = re_decoder_block_resnet.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase = prefix + resnet_block
UpperCAmelCase = re_decoder_block_resnet.sub(A , A )
elif re_decoder_block_proj_in.fullmatch(A ):
UpperCAmelCase = re_decoder_block_proj_in.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
UpperCAmelCase = re_decoder_block_proj_in.sub(A , A )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A ):
UpperCAmelCase = re_prior_cond_conv_out.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
UpperCAmelCase = re_prior_cond_conv_out.sub(A , A )
elif re_prior_cond_resnet.fullmatch(A ):
UpperCAmelCase = re_prior_cond_resnet.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
UpperCAmelCase = {"1": 1, "3": 2}[groups[-2]]
UpperCAmelCase = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
UpperCAmelCase = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
UpperCAmelCase = prefix + resnet_block
UpperCAmelCase = re_prior_cond_resnet.sub(A , A )
elif re_prior_cond_proj_in.fullmatch(A ):
UpperCAmelCase = re_prior_cond_proj_in.match(A )
UpperCAmelCase = regex_match.groups()
UpperCAmelCase = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
UpperCAmelCase = re_prior_cond_proj_in.sub(A , A )
# keep original key
else:
UpperCAmelCase = original_key
UpperCAmelCase = replace_key(A )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
UpperCAmelCase = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
UpperCAmelCase = original_key
UpperCAmelCase = original_key
UpperCAmelCase = value
return new_dict
@torch.no_grad()
def lowerCamelCase__ ( A : int=None , A : Tuple=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
UpperCAmelCase = requests.get(f"""{PREFIX}{file}""" , allow_redirects=A )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=A )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" , '''wb''' ).write(r.content )
UpperCAmelCase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
UpperCAmelCase = JukeboxConfig.from_pretrained(A )
UpperCAmelCase = JukeboxModel(A )
UpperCAmelCase = []
UpperCAmelCase = {}
for i, dict_name in enumerate(A ):
UpperCAmelCase = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )["model"]
UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
UpperCAmelCase = old_dic[k]
elif k.endswith('''.w''' ):
UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
UpperCAmelCase = old_dic[k]
else:
UpperCAmelCase = old_dic[k]
UpperCAmelCase = "vqvae" if i == 0 else f"""priors.{3 - i}"""
UpperCAmelCase = fix_jukebox_keys(A , model.state_dict() , A , A )
weight_dict.append(A )
UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(A )
for i in range(len(A ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A ).mkdir(exist_ok=A )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(A , A )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
return weight_dict
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_lowercase : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__( lowercase__ , unittest.TestCase ):
__magic_name__ : str = KandinskyVaaControlnetImgaImgPipeline
__magic_name__ : int = ["image_embeds", "negative_image_embeds", "image", "hint"]
__magic_name__ : List[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__magic_name__ : Optional[Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ : str = False
@property
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
return 32
@property
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
return 32
@property
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def a__( self : str )-> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__( self : List[str] )-> int:
"""simple docstring"""
return 100
@property
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**UpperCAmelCase__ )
return model
@property
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__( self : Tuple )-> Any:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**UpperCAmelCase__ )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any=0 )-> Dict:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCAmelCase__ )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
if str(UpperCAmelCase__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**UpperCAmelCase__ )
UpperCAmelCase = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = init_image.resize((512, 512) )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase = torch.from_numpy(np.array(UpperCAmelCase__ ) ).float() / 255.0
UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase = '''A robot, 4k photo'''
UpperCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCAmelCase__ )
UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(UpperCAmelCase__ )
pipeline.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase = pipe_prior(
UpperCAmelCase__ , image=UpperCAmelCase__ , strength=0.85 , generator=UpperCAmelCase__ , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=UpperCAmelCase__ , image_embeds=UpperCAmelCase__ , negative_image_embeds=UpperCAmelCase__ , hint=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(UpperCAmelCase__ , UpperCAmelCase__ )
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if len(_lowerCAmelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
UpperCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
_lowercase : str = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
_lowercase : Tuple = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
_lowercase : int = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase__ ( A : List[str] , A : Dict ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase__ ( A : List[Any] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( A : List[Any] , A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = float(pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0] )
UpperCAmelCase = float(spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def a__( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( UpperCAmelCase_ ):
__magic_name__ : Tuple = "ClapFeatureExtractor"
__magic_name__ : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
def __call__( self : Optional[int] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = kwargs.pop('''sampling_rate''' , _lowercase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if audios is not None:
UpperCAmelCase = self.feature_extractor(
_lowercase , sampling_rate=_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and audios is not None:
UpperCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def a__( self : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int] )-> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def a__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
_lowercase : Any = TypeVar("""T""")
_lowercase : Optional[Any] = TypeVar("""U""")
class UpperCamelCase__( Generic[T, U] ):
def __init__( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = key
UpperCAmelCase = val
UpperCAmelCase = None
UpperCAmelCase = None
def __repr__( self : List[str] )-> Tuple:
"""simple docstring"""
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCamelCase__( Generic[T, U] ):
def __init__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = DoubleLinkedListNode(lowercase_ , lowercase_ )
UpperCAmelCase = DoubleLinkedListNode(lowercase_ , lowercase_ )
UpperCAmelCase = self.rear, self.head
def __repr__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = ["DoubleLinkedList"]
UpperCAmelCase = self.head
while node.next is not None:
rep.append(str(lowercase_ ) )
UpperCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase_ )
def a__( self : Union[str, Any] , lowerCAmelCase : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCAmelCase = node
UpperCAmelCase = previous
UpperCAmelCase = node
UpperCAmelCase = self.rear
def a__( self : Optional[int] , lowerCAmelCase : Any )-> List[Any]:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
UpperCAmelCase = node.next
UpperCAmelCase = node.prev
UpperCAmelCase = None
UpperCAmelCase = None
return node
class UpperCamelCase__( Generic[T, U] ):
__magic_name__ : str = {}
def __init__( self : Optional[Any] , lowerCAmelCase : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = DoubleLinkedList()
UpperCAmelCase = capacity
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = {}
def __repr__( self : List[str] )-> int:
"""simple docstring"""
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self : Union[str, Any] , lowerCAmelCase : List[Any] )-> Any:
"""simple docstring"""
return key in self.cache
def a__( self : Tuple , lowerCAmelCase : Optional[int] )-> str:
"""simple docstring"""
if key in self.cache:
self.hits += 1
UpperCAmelCase = self.cache[key]
UpperCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_ )
return node.val
self.miss += 1
return None
def a__( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCAmelCase = DoubleLinkedListNode(lowercase_ , lowercase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCAmelCase = value
self.list.add(lowercase_ )
@classmethod
def a__( cls : Dict , lowerCAmelCase : List[Any] = 128 )-> Optional[int]:
"""simple docstring"""
def cache_decorator_inner(lowerCAmelCase : Optional[int] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase : Optional[int] ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCAmelCase = LRUCache(lowercase_ )
UpperCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCAmelCase = func(*lowercase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ , '''cache_info''' , lowercase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCamelCase__( _UpperCAmelCase ):
__magic_name__ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : List[Any] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class UpperCamelCase__( __lowercase ):
__magic_name__ : List[Any] = "distilbert"
__magic_name__ : Optional[Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Tuple , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : int=6 , lowerCAmelCase : Any=12 , lowerCAmelCase : Optional[Any]=768 , lowerCAmelCase : Optional[int]=4 * 768 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=0.2 , lowerCAmelCase : List[Any]=0 , **lowerCAmelCase : List[Any] , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = sinusoidal_pos_embds
UpperCAmelCase = n_layers
UpperCAmelCase = n_heads
UpperCAmelCase = dim
UpperCAmelCase = hidden_dim
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation
UpperCAmelCase = initializer_range
UpperCAmelCase = qa_dropout
UpperCAmelCase = seq_classif_dropout
super().__init__(**lowerCAmelCase , pad_token_id=lowerCAmelCase )
class UpperCamelCase__( __lowercase ):
@property
def a__( self : Optional[int] )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_lowercase : str = NewType("""DataClass""", Any)
_lowercase : int = NewType("""DataClassType""", Any)
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def lowerCamelCase__ ( A : list ):
'''simple docstring'''
UpperCAmelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda A : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( *,
A : Union[str, List[str]] = None , A : str = None , A : Any = dataclasses.MISSING , A : Callable[[], Any] = dataclasses.MISSING , A : dict = None , **A : Union[str, Any] , ):
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCAmelCase = {}
if aliases is not None:
UpperCAmelCase = aliases
if help is not None:
UpperCAmelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = 42
def __init__( self : Dict , lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
if "formatter_class" not in kwargs:
UpperCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCamelCase )
if dataclasses.is_dataclass(__UpperCamelCase ):
UpperCAmelCase = [dataclass_types]
UpperCAmelCase = list(__UpperCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCamelCase )
@staticmethod
def a__( lowerCAmelCase : ArgumentParser , lowerCAmelCase : dataclasses.Field )-> List[str]:
"""simple docstring"""
UpperCAmelCase = F"""--{field.name}"""
UpperCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCamelCase ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
UpperCAmelCase = kwargs.pop('''aliases''' , [] )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase = [aliases]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
if origin_type is Union or (hasattr(__UpperCamelCase , '''UnionType''' ) and isinstance(__UpperCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCamelCase ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F""" Problem encountered in field \'{field.name}\'.""" )
if type(__UpperCamelCase ) not in field.type.__args__:
# filter `str` in Union
UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCAmelCase = (
field.type.__args__[0] if isinstance(__UpperCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCAmelCase = getattr(field.type , '''__origin__''' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , __UpperCamelCase ) and issubclass(field.type , __UpperCamelCase )):
if origin_type is Literal:
UpperCAmelCase = field.type.__args__
else:
UpperCAmelCase = [x.value for x in field.type]
UpperCAmelCase = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
else:
UpperCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCAmelCase = copy(__UpperCamelCase )
# Hack because type=bool in argparse does not behave as we want.
UpperCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCAmelCase = '''?'''
# This is the value that will get picked if we do --field_name (without value)
UpperCAmelCase = True
elif isclass(__UpperCamelCase ) and issubclass(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase = field.type.__args__[0]
UpperCAmelCase = '''+'''
if field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCAmelCase = True
else:
UpperCAmelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCAmelCase = field.default_factory()
else:
UpperCAmelCase = True
parser.add_argument(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCAmelCase = False
parser.add_argument(F"""--no_{field.name}""" , action='''store_false''' , dest=field.name , **__UpperCamelCase )
def a__( self : Optional[Any] , lowerCAmelCase : DataClassType )-> List[str]:
"""simple docstring"""
if hasattr(__UpperCamelCase , '''_argument_group_name''' ):
UpperCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCAmelCase = self
try:
UpperCAmelCase = get_type_hints(__UpperCamelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCamelCase ):
UpperCAmelCase = '''.'''.join(map(__UpperCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(__UpperCamelCase ):
if not field.init:
continue
UpperCAmelCase = type_hints[field.name]
self._parse_dataclass_field(__UpperCamelCase , __UpperCamelCase )
def a__( self : Dict , lowerCAmelCase : Tuple=None , lowerCAmelCase : int=False , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=None , lowerCAmelCase : int=None , )-> Tuple[DataClass, ...]:
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCAmelCase = []
if args_filename:
args_files.append(Path(__UpperCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCAmelCase = ArgumentParser()
args_file_parser.add_argument(__UpperCamelCase , type=__UpperCamelCase , action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCAmelCase , UpperCAmelCase = args_file_parser.parse_known_args(args=__UpperCamelCase )
UpperCAmelCase = vars(__UpperCamelCase ).get(args_file_flag.lstrip('''-''' ) , __UpperCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCamelCase ) for p in cmd_args_file_paths] )
UpperCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCAmelCase , UpperCAmelCase = self.parse_known_args(args=__UpperCamelCase )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def a__( self : Any , lowerCAmelCase : Dict[str, Any] , lowerCAmelCase : bool = False )-> Tuple[DataClass, ...]:
"""simple docstring"""
UpperCAmelCase = set(args.keys() )
UpperCAmelCase = []
for dtype in self.dataclass_types:
UpperCAmelCase = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
UpperCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCAmelCase = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(__UpperCamelCase )}""" )
return tuple(__UpperCamelCase )
def a__( self : Any , lowerCAmelCase : str , lowerCAmelCase : bool = False )-> Tuple[DataClass, ...]:
"""simple docstring"""
with open(Path(__UpperCamelCase ) , encoding='''utf-8''' ) as open_json_file:
UpperCAmelCase = json.loads(open_json_file.read() )
UpperCAmelCase = self.parse_dict(__UpperCamelCase , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
def a__( self : Any , lowerCAmelCase : str , lowerCAmelCase : bool = False )-> Tuple[DataClass, ...]:
"""simple docstring"""
UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(__UpperCamelCase ).read_text() ) , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Dict=True )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_downsample:
UpperCAmelCase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any]=True )-> str:
"""simple docstring"""
UpperCAmelCase = ()
for resnet in self.resnets:
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase = self.downsamplers_a(lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> str:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Any=True )-> Tuple:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : bool = True
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = []
for i in range(self.num_layers ):
UpperCAmelCase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
if self.add_upsample:
UpperCAmelCase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple=True )-> List[Any]:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase = res_hidden_states_tuple[-1]
UpperCAmelCase = res_hidden_states_tuple[:-1]
UpperCAmelCase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
if self.add_upsample:
UpperCAmelCase = self.upsamplers_a(lowerCAmelCase )
return hidden_states
class UpperCamelCase__( nn.Module ):
__magic_name__ : int
__magic_name__ : float = 0.0
__magic_name__ : int = 1
__magic_name__ : int = 1
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : jnp.dtype = jnp.floataa
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
UpperCAmelCase = []
for _ in range(self.num_layers ):
UpperCAmelCase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowerCAmelCase )
UpperCAmelCase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowerCAmelCase )
UpperCAmelCase = resnets
UpperCAmelCase = attentions
def __call__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : int=True )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.resnets[0](lowerCAmelCase , lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
UpperCAmelCase = attn(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
UpperCAmelCase = resnet(lowerCAmelCase , lowerCAmelCase , deterministic=lowerCAmelCase )
return hidden_states
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class UpperCamelCase__( __A , unittest.TestCase ):
__magic_name__ : List[Any] = RoFormerTokenizer
__magic_name__ : List[Any] = RoFormerTokenizerFast
__magic_name__ : Optional[int] = True
__magic_name__ : Union[str, Any] = True
def a__( self : Dict )-> Tuple:
"""simple docstring"""
super().setUp()
def a__( self : Union[str, Any] , **lowerCAmelCase : Any )-> Any:
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **UpperCamelCase__ )
def a__( self : Any , **lowerCAmelCase : str )-> Union[str, Any]:
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **UpperCamelCase__ )
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = self.get_chinese_input_output_texts()
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
pass
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
pass
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( a__ ):
__magic_name__ : Dict = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ChineseCLIPImageProcessor"
__magic_name__ : Union[str, Any] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Dict )-> Any:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase_ , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : int )-> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ )
def a__( self : Optional[int] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Union[str, Any] )-> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def a__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : List[Any] )-> Any:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__( self : int )-> List[str]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase_ , )
return self.image_processor_class
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase : Optional[int] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
_lowercase : Optional[int] = (
subprocess.check_output(F"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
)
_lowercase : List[Any] = """|""".join(sys.argv[1:])
_lowercase : Dict = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase : List[Any] = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase : str = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def lowerCamelCase__ ( A : int=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class UpperCamelCase__( __UpperCAmelCase ):
__magic_name__ : str = None
__magic_name__ : str = None
def a__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
UpperCAmelCase = dataset_module_factory(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
UpperCAmelCase = builder_cls(
cache_dir=lowerCAmelCase_ , config_name=lowerCAmelCase_ , hash=dataset_module.hash , )
UpperCAmelCase = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
UpperCAmelCase = cached_path(lowerCAmelCase_ , cache_dir=lowerCAmelCase_ )
self.assertTrue(os.path.exists(lowerCAmelCase_ ) )
@pytest.mark.integration
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=__lowerCAmelCase )
UpperCAmelCase = import_main_class(dataset_module.module_path )
UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCAmelCase = None
builder_instance.download_and_prepare()
UpperCAmelCase = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = dataset_module_factory('''wikipedia''' , cache_dir=__lowerCAmelCase )
UpperCAmelCase = import_main_class(dataset_module.module_path , dataset=__lowerCAmelCase )
UpperCAmelCase = builder_cls(
cache_dir=__lowerCAmelCase , config_name='''20220301.frr''' , hash=dataset_module.hash , )
UpperCAmelCase = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert "train" in ds
assert isinstance(ds['''train'''] , __lowerCAmelCase )
assert next(iter(ds['''train'''] ) )
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowercase : List[str] = True
except (ImportError, ModuleNotFoundError):
_lowercase : Tuple = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
re.sub('''<n>''' , '''''' , a_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(a_ ) )
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowercase : int = logging.get_logger(__name__)
@add_end_docstrings(_a )
class lowerCamelCase_( _a ):
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Any )-> List[str]:
"""simple docstring"""
super().__init__(*_A , **_A )
requires_backends(self , '''decord''' )
self.check_model_type(_A )
def a__( self : int , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=None )-> Dict:
"""simple docstring"""
UpperCAmelCase = {}
if frame_sampling_rate is not None:
UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase = num_frames
UpperCAmelCase = {}
if top_k is not None:
UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any] , lowerCAmelCase : List[Any] , **lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
return super().__call__(_A , **_A )
def a__( self : Any , lowerCAmelCase : Dict , lowerCAmelCase : str=None , lowerCAmelCase : Dict=1 )-> Any:
"""simple docstring"""
if num_frames is None:
UpperCAmelCase = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
UpperCAmelCase = BytesIO(requests.get(_A ).content )
UpperCAmelCase = VideoReader(_A )
videoreader.seek(0 )
UpperCAmelCase = 0
UpperCAmelCase = num_frames * frame_sampling_rate - 1
UpperCAmelCase = np.linspace(_A , _A , num=_A , dtype=np.intaa )
UpperCAmelCase = videoreader.get_batch(_A ).asnumpy()
UpperCAmelCase = list(_A )
UpperCAmelCase = self.image_processor(_A , return_tensors=self.framework )
return model_inputs
def a__( self : Tuple , lowerCAmelCase : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model(**_A )
return model_outputs
def a__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=5 )-> Tuple:
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase , UpperCAmelCase = probs.topk(_A )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
UpperCAmelCase = scores.tolist()
UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )]
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import torch
from torch import nn
class UpperCamelCase__( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : Any=False )-> Dict:
"""simple docstring"""
super().__init__()
UpperCAmelCase = n_token
UpperCAmelCase = d_embed
UpperCAmelCase = d_proj
UpperCAmelCase = cutoffs + [n_token]
UpperCAmelCase = [0] + self.cutoffs
UpperCAmelCase = div_val
UpperCAmelCase = self.cutoffs[0]
UpperCAmelCase = len(self.cutoffs ) - 1
UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCamelCase , __UpperCamelCase ) ) )
else:
self.out_projs.append(__UpperCamelCase )
self.out_layers.append(nn.Linear(__UpperCamelCase , __UpperCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__UpperCamelCase , __UpperCamelCase ) ) )
self.out_layers.append(nn.Linear(__UpperCamelCase , r_idx - l_idx ) )
UpperCAmelCase = keep_order
def a__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
if proj is None:
UpperCAmelCase = nn.functional.linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase = nn.functional.linear(__UpperCamelCase , proj.t().contiguous() )
UpperCAmelCase = nn.functional.linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def a__( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Union[str, Any]=False )-> Any:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase = hidden[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(__UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase = labels != -100
UpperCAmelCase = torch.zeros_like(__UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = (
-nn.functional.log_softmax(__UpperCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCamelCase )
biases.append(__UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
if labels is None:
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase = torch.zeros_like(__UpperCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = 0
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__UpperCamelCase ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase = labels.index_select(0 , __UpperCamelCase ) - l_idx
UpperCAmelCase = head_logprob.index_select(0 , __UpperCamelCase )
UpperCAmelCase = hidden.index_select(0 , __UpperCamelCase )
else:
UpperCAmelCase = hidden
if i == 0:
if labels is not None:
UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __UpperCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def a__( self : Optional[int] , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(__UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__UpperCamelCase )
biases.append(__UpperCamelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(__UpperCamelCase ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCAmelCase = nn.functional.log_softmax(__UpperCamelCase , dim=1 )
UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase = logprob_i
return out
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
_lowercase : str = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
_lowercase : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( A : Vector , A : Vector ):
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def lowerCamelCase__ ( A : Vector , A : Vector ):
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) )
benchmark()
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowercase : int = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class UpperCamelCase__:
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = graph
# mapping node to its parent in resulting breadth first tree
UpperCAmelCase = {}
UpperCAmelCase = source_vertex
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = {self.source_vertex}
UpperCAmelCase = None
UpperCAmelCase = [self.source_vertex] # first in first out queue
while queue:
UpperCAmelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCAmelCase )
UpperCAmelCase = vertex
queue.append(lowerCAmelCase )
def a__( self : Any , lowerCAmelCase : List[Any] )-> Union[str, Any]:
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
UpperCAmelCase = self.parent.get(lowerCAmelCase )
if target_vertex_parent is None:
UpperCAmelCase = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCAmelCase )
return self.shortest_path(lowerCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_lowercase : Tuple = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_lowercase : Tuple = 4
_lowercase : Union[str, Any] = 3
class UpperCamelCase__( __lowerCAmelCase ):
pass
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(A ):
yield {"i": i, "shard": shard}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = int(os.environ['''RANK'''] )
UpperCAmelCase = int(os.environ['''WORLD_SIZE'''] )
UpperCAmelCase = ArgumentParser()
parser.add_argument('''--streaming''' , type=A )
parser.add_argument('''--local_rank''' , type=A )
parser.add_argument('''--num_workers''' , type=A , default=0 )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.streaming
UpperCAmelCase = args.num_workers
UpperCAmelCase = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(A )]}
UpperCAmelCase = IterableDataset.from_generator(A , gen_kwargs=A )
if not streaming:
UpperCAmelCase = Dataset.from_list(list(A ) )
UpperCAmelCase = split_dataset_by_node(A , rank=A , world_size=A )
UpperCAmelCase = torch.utils.data.DataLoader(A , num_workers=A )
UpperCAmelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
UpperCAmelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
UpperCAmelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Optional[Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['LayoutLMv2FeatureExtractor']
_lowercase : int = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
class UpperCamelCase__( lowercase_ ):
__magic_name__ : int = '''encoder-decoder'''
__magic_name__ : Dict = True
def __init__( self : List[Any] , **lowerCAmelCase : Dict )-> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCAmelCase = kwargs.pop('''encoder''' )
UpperCAmelCase = encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''decoder''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = True
@classmethod
def a__( cls : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Tuple )-> Tuple:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
UpperCAmelCase = True
UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase )
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.encoder.to_dict()
UpperCAmelCase = self.decoder.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase__( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Any , lowerCAmelCase : float , lowerCAmelCase : Callable , lowerCAmelCase : int , lowerCAmelCase : float = 1.0 , lowerCAmelCase : str = None , )-> int:
"""simple docstring"""
super().__init__()
UpperCAmelCase = initial_learning_rate
UpperCAmelCase = warmup_steps
UpperCAmelCase = power
UpperCAmelCase = decay_schedule_fn
UpperCAmelCase = name
def __call__( self : Optional[Any] , lowerCAmelCase : List[str] )-> str:
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase = tf.cast(lowerCAmelCase , tf.floataa )
UpperCAmelCase = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase = global_step_float / warmup_steps_float
UpperCAmelCase = self.initial_learning_rate * tf.math.pow(lowerCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCAmelCase , )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( A : Optional[Any] , A : List[str] , A : Tuple , A : str = 0.0 , A : Optional[int] = 0.9 , A : Union[str, Any] = 0.999 , A : List[Any] = 1E-8 , A : str = None , A : Optional[int] = None , A : Tuple = 0.0 , A : Any = 1.0 , A : List[str] = None , ):
'''simple docstring'''
UpperCAmelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowerCamelCase__ , )
if num_warmup_steps:
UpperCAmelCase = WarmUp(
initial_learning_rate=lowerCamelCase__ , decay_schedule_fn=lowerCamelCase__ , warmup_steps=lowerCamelCase__ , )
if weight_decay_rate > 0.0:
UpperCAmelCase = AdamWeightDecay(
learning_rate=lowerCamelCase__ , weight_decay_rate=lowerCamelCase__ , beta_a=lowerCamelCase__ , beta_a=lowerCamelCase__ , epsilon=lowerCamelCase__ , clipnorm=lowerCamelCase__ , global_clipnorm=lowerCamelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=lowerCamelCase__ , )
else:
UpperCAmelCase = tf.keras.optimizers.Adam(
learning_rate=lowerCamelCase__ , beta_a=lowerCamelCase__ , beta_a=lowerCamelCase__ , epsilon=lowerCamelCase__ , clipnorm=lowerCamelCase__ , global_clipnorm=lowerCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Optional[int] , lowerCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowerCAmelCase : float = 0.9 , lowerCAmelCase : float = 0.999 , lowerCAmelCase : float = 1E-7 , lowerCAmelCase : bool = False , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "AdamWeightDecay" , **lowerCAmelCase : Union[str, Any] , )-> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = weight_decay_rate
UpperCAmelCase = include_in_weight_decay
UpperCAmelCase = exclude_from_weight_decay
@classmethod
def a__( cls : Any , lowerCAmelCase : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = {'''WarmUp''': WarmUp}
return super(lowerCAmelCase , cls ).from_config(lowerCAmelCase , custom_objects=lowerCAmelCase )
def a__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
super(lowerCAmelCase , self )._prepare_local(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def a__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Dict )-> Any:
"""simple docstring"""
UpperCAmelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def a__( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict=None , **lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = list(zip(*lowerCAmelCase ) )
return super(lowerCAmelCase , self ).apply_gradients(zip(lowerCAmelCase , lowerCAmelCase ) , name=lowerCAmelCase , **lowerCAmelCase )
def a__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] )-> int:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase = apply_state or {}
UpperCAmelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase = self._fallback_apply_state(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str=None )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase )
UpperCAmelCase = self._decay_weights_op(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase , self )._resource_apply_dense(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=None )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_lr(var.device , var.dtype.base_dtype , lowerCAmelCase )
UpperCAmelCase = self._decay_weights_op(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with tf.control_dependencies([decay] ):
return super(lowerCAmelCase , self )._resource_apply_sparse(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCAmelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def a__( self : Dict , lowerCAmelCase : Union[str, Any] )-> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCAmelCase , lowerCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCAmelCase , lowerCAmelCase ) is not None:
return False
return True
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Any )-> int:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = None
@property
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Tuple , lowerCAmelCase : List[str] )-> Dict:
"""simple docstring"""
if not self._gradients:
UpperCAmelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCAmelCase ) , trainable=lowerCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCAmelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(lowerCAmelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , lowerCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCAmelCase )
self._accum_steps.assign_add(1 )
def a__( self : Dict )-> str:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCAmelCase ) )
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__:
def __init__( self : Tuple , lowerCAmelCase : int = 6 )-> None:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def a__( self : Optional[Any] , lowerCAmelCase : int )-> None:
"""simple docstring"""
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def a__( self : Optional[int] )-> bool:
"""simple docstring"""
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def a__( self : Optional[Any] )-> Any | None:
"""simple docstring"""
self.check_can_perform_operation()
return self.front.data if self.front else None
def a__( self : Optional[Any] , lowerCAmelCase : Any )-> None:
"""simple docstring"""
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def a__( self : Dict )-> Any:
"""simple docstring"""
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def a__( self : str )-> None:
"""simple docstring"""
if self.is_empty():
raise Exception('''Empty Queue''' )
def a__( self : List[Any] )-> None:
"""simple docstring"""
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class UpperCamelCase__:
def __init__( self : List[Any] )-> None:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( A : Dict , A : Any ):
'''simple docstring'''
UpperCAmelCase = []
for part_id in partition_order:
UpperCAmelCase = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(1_00 ).repartition(1 )
UpperCAmelCase = Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(2 )
UpperCAmelCase = [1, 0]
UpperCAmelCase = _generate_iterable_examples(A , A ) # Reverse the partitions.
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(10 ).repartition(1 )
UpperCAmelCase = SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
UpperCAmelCase = lambda A : x.reverse()
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
UpperCAmelCase = SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
UpperCAmelCase , UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
UpperCAmelCase = spark.range(1_00 ).repartition(1 )
UpperCAmelCase = Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCAmelCase = 1
UpperCAmelCase = 1
while repunit:
UpperCAmelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowerCamelCase__ ( A : int = 1_00_00_00 ):
'''simple docstring'''
UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
def lowerCamelCase__ ( A : int = 10_00 ):
'''simple docstring'''
return sum(e for e in range(3 , A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : int = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "codegen"
__magic_name__ : Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCAmelCase : Optional[int]=50400 , lowerCAmelCase : Tuple=2048 , lowerCAmelCase : List[Any]=2048 , lowerCAmelCase : Optional[int]=4096 , lowerCAmelCase : str=28 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Dict=None , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=1E-5 , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]=50256 , lowerCAmelCase : str=50256 , lowerCAmelCase : List[Any]=False , **lowerCAmelCase : Union[str, Any] , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_ctx
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = rotary_dim
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , )-> Optional[Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , '''pad_token_id''' , lowerCAmelCase ):
# TODO: how to do that better?
UpperCAmelCase = 0
@property
def a__( self : Any )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='''inputs''' )
UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return self._config.n_layer
@property
def a__( self : int )-> int:
"""simple docstring"""
return self._config.n_head
def a__( self : Dict , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase = super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase = seqlen + 2
UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase = [
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def a__( self : Dict )-> int:
"""simple docstring"""
return 13
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = (EulerDiscreteScheduler,)
__magic_name__ : str = 10
def a__( self : List[str] , **lowerCAmelCase : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**lowerCAmelCase )
return config
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
UpperCAmelCase = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
UpperCAmelCase = output.prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowercase : Dict = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Union[str, Any] = UNetaDModel
__magic_name__ : Union[str, Any] = "sample"
@property
def a__( self : Dict )-> Any:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
UpperCAmelCase = torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def a__( self : int )-> str:
"""simple docstring"""
return (3, 32, 32)
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = UNetaDModel
__magic_name__ : str = "sample"
@property
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 4
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
UpperCAmelCase = torch.tensor([10] ).to(lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return (4, 32, 32)
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
model.to(lowerCAmelCase )
UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase )
model_accelerate.to(lowerCAmelCase )
model_accelerate.eval()
UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase = noise.to(lowerCAmelCase )
UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
UpperCAmelCase = model_accelerate(lowerCAmelCase , lowerCAmelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=lowerCAmelCase , low_cpu_mem_usage=lowerCAmelCase )
model_normal_load.to(lowerCAmelCase )
model_normal_load.eval()
UpperCAmelCase = model_normal_load(lowerCAmelCase , lowerCAmelCase )['''sample''']
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(lowerCAmelCase )
UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase = noise.to(lowerCAmelCase )
UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-3 ) )
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : int = UNetaDModel
__magic_name__ : List[Any] = "sample"
@property
def a__( self : Any , lowerCAmelCase : Union[str, Any]=(32, 32) )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def a__( self : Any )-> str:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowerCAmelCase )
UpperCAmelCase = self.dummy_input
UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(lowerCAmelCase )
UpperCAmelCase = noise
UpperCAmelCase = model(**lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(lowerCAmelCase )
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = (256, 256)
UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
UpperCAmelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample
UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
def a__( self : List[str] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(lowerCAmelCase )
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(lowerCAmelCase )
UpperCAmelCase = torch.tensor(batch_size * [1E-4] ).to(lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase ).sample
UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
pass
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = """▁"""
_lowercase = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : str = ["image_processor", "tokenizer"]
__magic_name__ : Union[str, Any] = "LayoutLMv3ImageProcessor"
__magic_name__ : List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Optional[int] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : Optional[int] , )-> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=lowerCAmelCase , return_tensors=lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features['''words''']
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , stride=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_overflowing_tokens=lowerCAmelCase , return_special_tokens_mask=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , return_length=lowerCAmelCase , verbose=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase , )
# add pixel values
UpperCAmelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
UpperCAmelCase = images
return encoded_inputs
def a__( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(lowerCAmelCase )} and {len(lowerCAmelCase )}""" )
return images_with_overflow
def a__( self : List[str] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : List[Any] )-> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Tuple )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Dict:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a__( self : int )-> Tuple:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
import socket
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 1_23_12
sock.connect((host, port) )
sock.send(b'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase = sock.recv(10_24 )
if not data:
break
out_file.write(A )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__:
def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : int=2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Optional[int]=16 , lowerCAmelCase : str=[1, 2, 1] , lowerCAmelCase : int=[2, 2, 4] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Dict=2.0 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Tuple=False , lowerCAmelCase : int=True , lowerCAmelCase : str=0.02 , lowerCAmelCase : Union[str, Any]=1E-5 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Optional[Any]=8 , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
def a__( self : str )-> str:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a__( self : Optional[int] )-> int:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def a__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = SwinvaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = SwinvaForMaskedImageModeling(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = SwinvaForMaskedImageModeling(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = SwinvaForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__magic_name__ : List[str] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Optional[int] = False
__magic_name__ : List[Any] = False
__magic_name__ : List[str] = False
__magic_name__ : List[str] = False
def a__( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase = SwinvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 )
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def a__( self : Any )-> Any:
"""simple docstring"""
pass
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = config.window_size**2
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase = len(lowerCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
UpperCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def a__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# Swinv2 has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reshaped_hidden_states[0].shape
UpperCAmelCase = (
reshaped_hidden_states[0].view(lowerCAmelCase , lowerCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : str )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def a__( self : Any )-> List[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SwinvaModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : List[Any] )-> str:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
lowerCAmelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : BigBirdConfig
__magic_name__ : jnp.dtype = jnp.floataa
__magic_name__ : bool = True
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
super().setup()
UpperCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : List[Any] , *lowerCAmelCase : str , **lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Dict = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase__ ( A : str , A : Any , A : int , A : str , A : List[str] , A : List[Any] ):
'''simple docstring'''
def cross_entropy(A : Optional[int] , A : Tuple , A : int=None ):
UpperCAmelCase = logits.shape[-1]
UpperCAmelCase = (labels[..., None] == jnp.arange(A )[None]).astype('''f4''' )
UpperCAmelCase = jax.nn.log_softmax(A , axis=-1 )
UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase = reduction(A )
return loss
UpperCAmelCase = partial(A , reduction=jnp.mean )
UpperCAmelCase = cross_entropy(A , A )
UpperCAmelCase = cross_entropy(A , A )
UpperCAmelCase = cross_entropy(A , A )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase__:
__magic_name__ : str = "google/bigbird-roberta-base"
__magic_name__ : int = 3000
__magic_name__ : int = 1_0500
__magic_name__ : int = 128
__magic_name__ : int = 3
__magic_name__ : int = 1
__magic_name__ : int = 5
# tx_args
__magic_name__ : float = 3e-5
__magic_name__ : float = 0.0
__magic_name__ : int = 2_0000
__magic_name__ : float = 0.0_0_9_5
__magic_name__ : str = "bigbird-roberta-natural-questions"
__magic_name__ : str = "training-expt"
__magic_name__ : str = "data/nq-training.jsonl"
__magic_name__ : str = "data/nq-validation.jsonl"
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=lowerCAmelCase )
UpperCAmelCase = os.path.join(self.base_dir , self.save_dir )
UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase__:
__magic_name__ : int
__magic_name__ : int = 4096 # no dynamic padding on TPUs
def __call__( self : List[str] , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.collate_fn(lowerCAmelCase )
UpperCAmelCase = jax.tree_util.tree_map(lowerCAmelCase , lowerCAmelCase )
return batch
def a__( self : List[str] , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.fetch_inputs(features['''input_ids'''] )
UpperCAmelCase = {
'''input_ids''': jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(lowerCAmelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def a__( self : Tuple , lowerCAmelCase : list )-> int:
"""simple docstring"""
UpperCAmelCase = [self._fetch_inputs(lowerCAmelCase ) for ids in input_ids]
return zip(*lowerCAmelCase )
def a__( self : Dict , lowerCAmelCase : list )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [1 for _ in range(len(lowerCAmelCase ) )]
while len(lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase__ ( A : str , A : Optional[Any] , A : List[str]=None ):
'''simple docstring'''
if seed is not None:
UpperCAmelCase = dataset.shuffle(seed=A )
for i in range(len(A ) // batch_size ):
UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A )
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase__ ( A : List[str] , A : Optional[int] , **A : int ):
'''simple docstring'''
def loss_fn(A : Dict ):
UpperCAmelCase = model_inputs.pop('''start_labels''' )
UpperCAmelCase = model_inputs.pop('''end_labels''' )
UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
UpperCAmelCase = state.apply_fn(**A , params=A , dropout_rng=A , train=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
return state.loss_fn(
A , A , A , A , A , A , )
UpperCAmelCase , UpperCAmelCase = jax.random.split(A )
UpperCAmelCase = jax.value_and_grad(A )
UpperCAmelCase , UpperCAmelCase = grad_fn(state.params )
UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
UpperCAmelCase = jax.lax.pmean(A , '''batch''' )
UpperCAmelCase = state.apply_gradients(grads=A )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def lowerCamelCase__ ( A : List[str] , **A : List[str] ):
'''simple docstring'''
UpperCAmelCase = model_inputs.pop('''start_labels''' )
UpperCAmelCase = model_inputs.pop('''end_labels''' )
UpperCAmelCase = model_inputs.pop('''pooled_labels''' )
UpperCAmelCase = state.apply_fn(**A , params=state.params , train=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
UpperCAmelCase = state.loss_fn(A , A , A , A , A , A )
UpperCAmelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class UpperCamelCase__( train_state.TrainState ):
__magic_name__ : Callable = struct.field(pytree_node=lowerCAmelCase )
@dataclass
class UpperCamelCase__:
__magic_name__ : Args
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : Callable
__magic_name__ : wandb
__magic_name__ : Callable = None
def a__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]=None )-> Tuple:
"""simple docstring"""
UpperCAmelCase = model.params
UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , loss_fn=lowerCAmelCase , )
if ckpt_dir is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = restore_checkpoint(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
UpperCAmelCase , UpperCAmelCase = build_tx(**lowerCAmelCase )
UpperCAmelCase = train_state.TrainState(
step=lowerCAmelCase , apply_fn=model.__call__ , params=lowerCAmelCase , tx=lowerCAmelCase , opt_state=lowerCAmelCase , )
UpperCAmelCase = args
UpperCAmelCase = data_collator
UpperCAmelCase = lr
UpperCAmelCase = params
UpperCAmelCase = jax_utils.replicate(lowerCAmelCase )
return state
def a__( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.args
UpperCAmelCase = len(lowerCAmelCase ) // args.batch_size
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase = get_batched_dataset(lowerCAmelCase , args.batch_size , seed=lowerCAmelCase )
UpperCAmelCase = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc=F"""Running EPOCH-{epoch}""" ):
UpperCAmelCase = self.data_collator(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.train_step_fn(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase = jax_utils.unreplicate(state.step )
UpperCAmelCase = running_loss.item() / i
UpperCAmelCase = self.scheduler_fn(state_step - 1 )
UpperCAmelCase = self.evaluate(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(lowerCAmelCase ) )
self.logger.log(lowerCAmelCase , commit=lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=lowerCAmelCase )
def a__( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = get_batched_dataset(lowerCAmelCase , self.args.batch_size )
UpperCAmelCase = len(lowerCAmelCase ) // self.args.batch_size
UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa )
UpperCAmelCase = 0
for batch in tqdm(lowerCAmelCase , total=lowerCAmelCase , desc='''Evaluating ... ''' ):
UpperCAmelCase = self.data_collator(lowerCAmelCase )
UpperCAmelCase = self.val_step_fn(lowerCAmelCase , **lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def a__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = jax_utils.unreplicate(lowerCAmelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(lowerCAmelCase , params=state.params )
with open(os.path.join(lowerCAmelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(lowerCAmelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(lowerCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCAmelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , lowerCAmelCase )
print('''DONE''' )
def lowerCamelCase__ ( A : Tuple , A : Dict ):
'''simple docstring'''
print(f"""RESTORING CHECKPOINT FROM {save_dir}""" , end=''' ... ''' )
with open(os.path.join(A , '''flax_model.msgpack''' ) , '''rb''' ) as f:
UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(A , '''opt_state.msgpack''' ) , '''rb''' ) as f:
UpperCAmelCase = from_bytes(state.opt_state , f.read() )
UpperCAmelCase = joblib.load(os.path.join(A , '''args.joblib''' ) )
UpperCAmelCase = joblib.load(os.path.join(A , '''data_collator.joblib''' ) )
with open(os.path.join(A , '''training_state.json''' ) , '''r''' ) as f:
UpperCAmelCase = json.load(A )
UpperCAmelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def lowerCamelCase__ ( A : Any , A : Any , A : Union[str, Any] , A : Tuple ):
'''simple docstring'''
UpperCAmelCase = num_train_steps - warmup_steps
UpperCAmelCase = optax.linear_schedule(init_value=A , end_value=A , transition_steps=A )
UpperCAmelCase = optax.linear_schedule(init_value=A , end_value=1E-7 , transition_steps=A )
UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase__ ( A : List[Any] , A : Optional[Any] , A : str , A : Optional[Any] , A : Any ):
'''simple docstring'''
def weight_decay_mask(A : List[str] ):
UpperCAmelCase = traverse_util.flatten_dict(A )
UpperCAmelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(A )
UpperCAmelCase = scheduler_fn(A , A , A , A )
UpperCAmelCase = optax.adamw(learning_rate=A , weight_decay=A , mask=A )
return tx, lr
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__:
__magic_name__ : CommonSchedulerState
# setable values
__magic_name__ : jnp.ndarray
__magic_name__ : jnp.ndarray
__magic_name__ : Optional[int] = None
@classmethod
def a__( cls : str , lowerCAmelCase : CommonSchedulerState , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray )-> List[str]:
"""simple docstring"""
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : DDPMSchedulerState
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__magic_name__ : jnp.dtype
@property
def a__( self : str )-> str:
"""simple docstring"""
return True
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase : int = 1000 , lowerCAmelCase : float = 0.0001 , lowerCAmelCase : float = 0.02 , lowerCAmelCase : str = "linear" , lowerCAmelCase : Optional[jnp.ndarray] = None , lowerCAmelCase : str = "fixed_small" , lowerCAmelCase : bool = True , lowerCAmelCase : str = "epsilon" , lowerCAmelCase : jnp.dtype = jnp.floataa , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = dtype
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[CommonSchedulerState] = None )-> DDPMSchedulerState:
"""simple docstring"""
if common is None:
UpperCAmelCase = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def a__( self : Union[str, Any] , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : Optional[int] = None )-> jnp.ndarray:
"""simple docstring"""
return sample
def a__( self : str , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : int , lowerCAmelCase : Tuple = () )-> DDPMSchedulerState:
"""simple docstring"""
UpperCAmelCase = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase = (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def a__( self : Any , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : str , lowerCAmelCase : Any=None , lowerCAmelCase : Any=None )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = state.common.alphas_cumprod[t]
UpperCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase = jnp.clip(lowerCAmelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase = jnp.log(jnp.clip(lowerCAmelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase = variance
UpperCAmelCase = state.common.betas[t]
UpperCAmelCase = (predicted_variance + 1) / 2
UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def a__( self : List[str] , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : int , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : Optional[jax.random.KeyArray] = None , lowerCAmelCase : bool = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCAmelCase = timestep
if key is None:
UpperCAmelCase = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase , UpperCAmelCase = jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase = None
# 1. compute alphas, betas
UpperCAmelCase = state.common.alphas_cumprod[t]
UpperCAmelCase = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase = 1 - alpha_prod_t
UpperCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase = jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase = jax.random.split(lowerCAmelCase , num=1 )
UpperCAmelCase = jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
UpperCAmelCase = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , )-> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] , lowerCAmelCase : DDPMSchedulerState , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , lowerCAmelCase : jnp.ndarray , )-> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self : str )-> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
from statistics import mean
import numpy as np
def lowerCamelCase__ ( A : list , A : list , A : list , A : int ):
'''simple docstring'''
UpperCAmelCase = 0
# Number of processes finished
UpperCAmelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCAmelCase = [0] * no_of_process
# List to include calculation results
UpperCAmelCase = [0] * no_of_process
# Sort by arrival time.
UpperCAmelCase = [burst_time[i] for i in np.argsort(A )]
UpperCAmelCase = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCAmelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCAmelCase = arrival_time[i]
UpperCAmelCase = 0
# Index showing the location of the process being performed
UpperCAmelCase = 0
# Saves the current response ratio.
UpperCAmelCase = 0
for i in range(0 , A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCAmelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCAmelCase = temp
UpperCAmelCase = i
# Calculate the turn around time
UpperCAmelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCAmelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase__ ( A : list , A : list , A : list , A : int ):
'''simple docstring'''
UpperCAmelCase = [0] * no_of_process
for i in range(0 , A ):
UpperCAmelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_lowercase : int = 5
_lowercase : List[str] = ["""A""", """B""", """C""", """D""", """E"""]
_lowercase : Tuple = [1, 2, 3, 4, 5]
_lowercase : Union[str, Any] = [1, 2, 3, 4, 5]
_lowercase : int = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_lowercase : List[str] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCAmelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCAmelCase = min(A , A )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def a__( self : int )-> Tuple:
"""simple docstring"""
UpperCAmelCase = mock.Mock()
UpperCAmelCase = 500
UpperCAmelCase = {}
UpperCAmelCase = HTTPError
UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase ) as mock_head:
UpperCAmelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase = tempfile.mktemp()
with open(lowerCAmelCase , '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' , lowerCAmelCase )
UpperCAmelCase = AlbertTokenizer.from_pretrained(lowerCAmelCase )
finally:
os.remove(lowerCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''' , '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''' , lowerCAmelCase )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class UpperCamelCase__( unittest.TestCase ):
__magic_name__ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def a__( cls : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def a__( cls : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''test-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase , repo_id='''test-tokenizer''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizer(lowerCAmelCase )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''' , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCAmelCase , repo_id='''valid_org/test-tokenizer-org''' , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
UpperCAmelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def a__( self : int )-> Dict:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = CustomTokenizer(lowerCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = os.path.join(lowerCAmelCase , '''vocab.txt''' )
with open(lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained(lowerCAmelCase )
bert_tokenizer.save_pretrained(lowerCAmelCase )
UpperCAmelCase = CustomTokenizerFast.from_pretrained(lowerCAmelCase )
tokenizer.push_to_hub('''test-dynamic-tokenizer''' , use_auth_token=self._token )
UpperCAmelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizerFast''' )
UpperCAmelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=lowerCAmelCase , trust_remote_code=lowerCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , '''CustomTokenizer''' )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data , {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ) , ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ) , ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ) , ['''BC''', '''A'''] )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ) , ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ) , ['''AB''', '''C'''] )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ) , ['''ABC''', '''D'''] )
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Trie()
UpperCAmelCase = trie.cut_text('''ABC''' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCAmelCase , ['''AB''', '''C'''] )
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Tuple , lowerCAmelCase : str )-> Optional[int]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
UpperCAmelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(lowerCAmelCase )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , only_pretrain_model=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , torchscript=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , fpaa=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase )
# set architectures equal to `None`
UpperCAmelCase = None
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCAmelCase , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a__( self : Dict )-> str:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tinier_bart'''
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tinier_bart'''
UpperCAmelCase = AutoConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , save_to_csv=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCAmelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(lowerCAmelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(lowerCAmelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(lowerCAmelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(lowerCAmelCase , '''env.csv''' ) , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''env.csv''' ) ).exists() )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(lowerCAmelCase : Optional[Any] ):
self.assertTrue(hasattr(lowerCAmelCase , '''sequential''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''cumulative''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''current''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCAmelCase , inference=lowerCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCAmelCase , '''log.txt''' ) , log_print=lowerCAmelCase , trace_memory_line_by_line=lowerCAmelCase , multi_process=lowerCAmelCase , )
UpperCAmelCase = PyTorchBenchmark(lowerCAmelCase )
UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCAmelCase , '''log.txt''' ) ).exists() )
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = (DDIMParallelScheduler,)
__magic_name__ : Optional[int] = (("eta", 0.0), ("num_inference_steps", 50))
def a__( self : str , **lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCAmelCase )
return config
def a__( self : Dict , **lowerCAmelCase : int )-> int:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(**lowerCAmelCase )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = 10, 0.0
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase = model(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
return sample
def a__( self : Dict )-> Tuple:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def a__( self : int )-> List[Any]:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase )
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def a__( self : int )-> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def a__( self : Any )-> str:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase )
def a__( self : Dict )-> Any:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase )
def a__( self : str )-> int:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , )
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase )
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = self.dummy_sample_deter + 0.1
UpperCAmelCase = self.dummy_sample_deter - 0.1
UpperCAmelCase = samplea.shape[0]
UpperCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase = torch.arange(lowerCAmelCase )[0:3, None].repeat(1 , lowerCAmelCase )
UpperCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase )
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.full_loop()
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' )
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01 )
UpperCAmelCase = torch.sum(torch.abs(lowerCAmelCase ) )
UpperCAmelCase = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_lowercase : Dict = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( A : Any ):
UpperCAmelCase = defaultdict(A )
UpperCAmelCase = []
UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(A )
UpperCAmelCase = new_doc_list
UpperCAmelCase = [key for key, value in counts.items() if value > 1]
UpperCAmelCase = []
for duplicate_key in duplicates:
UpperCAmelCase = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(A ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCAmelCase = sorted(A , key=lambda A : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(A ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(A )
# Sort
return overview_doc
def lowerCamelCase__ ( A : Tuple=False ):
with open(A , encoding='''utf-8''' ) as f:
UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCAmelCase = api_doc[scheduler_idx]['''sections''']
UpperCAmelCase = clean_doc_toc(A )
UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
UpperCAmelCase = True
if overwrite:
UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
UpperCAmelCase = api_doc
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(A , allow_unicode=A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def lowerCamelCase__ ( A : Optional[Any]=False ):
with open(A , encoding='''utf-8''' ) as f:
UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCAmelCase = False
UpperCAmelCase = api_doc[pipeline_idx]['''sections''']
UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCAmelCase = pipeline_doc['''section''']
UpperCAmelCase = clean_doc_toc(A )
if overwrite:
UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(A )
# sort overall pipeline doc
UpperCAmelCase = clean_doc_toc(A )
if new_pipeline_docs != pipeline_docs:
UpperCAmelCase = True
if overwrite:
UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
UpperCAmelCase = api_doc
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(A , allow_unicode=A ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase : Optional[Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
_lowercase : List[Any] = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowercase : List[str] = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
_lowercase : Tuple = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
_lowercase : int = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
_lowercase : Optional[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
_lowercase : str = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
_lowercase : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
_lowercase : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
_lowercase : Optional[Any] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
_lowercase : Optional[int] = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
_lowercase : Dict = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
_lowercase : List[str] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
_lowercase : Optional[Any] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
_lowercase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowercase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowercase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowercase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowercase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowercase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowercase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowercase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowercase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowercase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowercase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowercase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowercase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Tuple = FLAX_MODEL_MAPPING
_lowercase : Optional[int] = auto_class_update(FlaxAutoModel)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Dict = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowercase : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowercase : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : str = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowercase : Tuple = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : List[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowercase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Dict = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowercase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowercase : Union[str, Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowercase : Optional[int] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowercase : List[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : Tuple = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase : List[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class UpperCamelCase__( _BaseAutoModelClass ):
__magic_name__ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowercase : List[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : str , A : list[str] | None = None ):
'''simple docstring'''
UpperCAmelCase = word_bank or []
# create a table
UpperCAmelCase = len(A ) + 1
UpperCAmelCase = []
for _ in range(A ):
table.append([] )
# seed value
UpperCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A )] == word:
UpperCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A )]:
combination.reverse()
return table[len(A )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = int(A )
if decimal in (0, 1): # Exit cases for the recursion
return str(A )
UpperCAmelCase , UpperCAmelCase = divmod(A , 2 )
return binary_recursive(A ) + str(A )
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = str(A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
UpperCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
UpperCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f"""{negative}0b{binary_recursive(int(A ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod()
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_lowercase : List[str] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_lowercase : Dict = {"""facebook/blenderbot-3B""": 128}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = ["input_ids", "attention_mask"]
__magic_name__ : Tuple = BlenderbotTokenizer
def __init__( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int="replace" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Tuple="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[int]=True , **lowerCAmelCase : int , )-> Dict:
"""simple docstring"""
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = getattr(lowerCAmelCase , pre_tok_state.pop('''type''' ) )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase = add_prefix_space
UpperCAmelCase = '''post_processor'''
UpperCAmelCase = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase )
if tokenizer_component_instance:
UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase = tuple(state['''cls'''] )
UpperCAmelCase = False
if state.get('''add_prefix_space''' , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase = add_prefix_space
UpperCAmelCase = True
if state.get('''trim_offsets''' , lowerCAmelCase ) != trim_offsets:
UpperCAmelCase = trim_offsets
UpperCAmelCase = True
if changes_to_apply:
UpperCAmelCase = getattr(lowerCAmelCase , state.pop('''type''' ) )
UpperCAmelCase = component_class(**lowerCAmelCase )
setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def a__( self : int )-> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def a__( self : Union[str, Any] , lowerCAmelCase : Dict )-> str:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else value
UpperCAmelCase = value
def a__( self : str , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Dict )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Any )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = kwargs.get('''is_split_into_words''' , lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def a__( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def a__( self : Optional[int] , lowerCAmelCase : "Conversation" )-> List[int]:
"""simple docstring"""
UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase )
UpperCAmelCase = ''' '''.join(lowerCAmelCase )
UpperCAmelCase = self.encode(lowerCAmelCase )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
if len(A ) != len(A ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase = 0
for chara, chara in zip(A , A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( A : str = "AAPL" ):
'''simple docstring'''
UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
UpperCAmelCase = BeautifulSoup(requests.get(A ).text , '''html.parser''' )
UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__( unittest.TestCase ):
@property
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , num_inference_steps=20 , output_type='''numpy''' , return_dict=lowerCAmelCase )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''google/ddpm-cifar10-32'''
UpperCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase )
UpperCAmelCase = PNDMScheduler()
UpperCAmelCase = PNDMPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
pndm.to(lowerCAmelCase )
pndm.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pndm(generator=lowerCAmelCase , output_type='''numpy''' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : list[int] ):
'''simple docstring'''
UpperCAmelCase = []
if len(A ) == 1:
return [nums.copy()]
for _ in range(len(A ) ):
UpperCAmelCase = nums.pop(0 )
UpperCAmelCase = permute(A )
for perm in permutations:
perm.append(A )
result.extend(A )
nums.append(A )
return result
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
def backtrack(A : Union[str, Any] ):
if start == len(A ) - 1:
output.append(nums[:] )
else:
for i in range(A , len(A ) ):
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase , UpperCAmelCase = nums[i], nums[start] # backtrack
UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
_lowercase : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = 3_84
if "tiny" in model_name:
UpperCAmelCase = [3, 3, 9, 3]
UpperCAmelCase = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [1_28, 2_56, 5_12, 10_24]
UpperCAmelCase = 5_12
if "large" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [1_92, 3_84, 7_68, 15_36]
UpperCAmelCase = 7_68
if "xlarge" in model_name:
UpperCAmelCase = [3, 3, 27, 3]
UpperCAmelCase = [2_56, 5_12, 10_24, 20_48]
UpperCAmelCase = 10_24
# set label information
UpperCAmelCase = 1_50
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = ConvNextConfig(
depths=A , hidden_sizes=A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") )
rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") )
if i > 0:
rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") )
rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCamelCase__ ( A : Optional[Any] , A : List[Any] , A : Any ):
'''simple docstring'''
UpperCAmelCase = dct.pop(A )
UpperCAmelCase = val
def lowerCamelCase__ ( A : Union[str, Any] , A : Union[str, Any] , A : str ):
'''simple docstring'''
UpperCAmelCase = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
UpperCAmelCase = model_name_to_url[model_name]
UpperCAmelCase = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase = get_upernet_config(A )
UpperCAmelCase = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(A )
if "bn" in key:
UpperCAmelCase = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase = val
# rename keys
UpperCAmelCase = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
model.load_state_dict(A )
# verify on image
UpperCAmelCase = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' )
UpperCAmelCase = SegformerImageProcessor()
UpperCAmelCase = processor(A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase = model(A )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(A )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__( lowerCAmelCase ):
def a__( self : List[Any] , lowerCAmelCase : float )-> float:
"""simple docstring"""
return 0.0
def lowerCamelCase__ ( A : np.ndarray , A : int ):
'''simple docstring'''
UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCamelCase__ ( A : FilterType , A : int ):
'''simple docstring'''
UpperCAmelCase = 5_12
UpperCAmelCase = [1] + [0] * (size - 1)
UpperCAmelCase = [filter_type.process(A ) for item in inputs]
UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase = np.abs(np.fft.fft(A ) )
UpperCAmelCase = 20 * np.logaa(A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
UpperCAmelCase = get_bounds(A , A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(A )
plt.show()
def lowerCamelCase__ ( A : FilterType , A : int ):
'''simple docstring'''
UpperCAmelCase = 5_12
UpperCAmelCase = [1] + [0] * (size - 1)
UpperCAmelCase = [filter_type.process(A ) for item in inputs]
UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
UpperCAmelCase = np.angle(np.fft.fft(A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(A , -2 * pi ) )
plt.show()
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
# TODO Update this
_lowercase : int = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = "esm"
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Dict=12 , lowerCAmelCase : List[Any]=3072 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=1026 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-12 , lowerCAmelCase : List[str]="absolute" , lowerCAmelCase : int=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , **lowerCAmelCase : Optional[int] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , mask_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = emb_layer_norm_before
UpperCAmelCase = token_dropout
UpperCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase = EsmFoldConfig()
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = EsmFoldConfig(**lowerCAmelCase )
UpperCAmelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase = get_default_vocab_list()
else:
UpperCAmelCase = vocab_list
else:
UpperCAmelCase = None
UpperCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowerCAmelCase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase ):
UpperCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : str = None
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : bool = False
__magic_name__ : float = 0
__magic_name__ : bool = True
__magic_name__ : bool = False
__magic_name__ : int = 128
__magic_name__ : "TrunkConfig" = None
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
if self.trunk is None:
UpperCAmelCase = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase ):
UpperCAmelCase = TrunkConfig(**self.trunk )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : int = 48
__magic_name__ : int = 1024
__magic_name__ : int = 128
__magic_name__ : int = 32
__magic_name__ : int = 32
__magic_name__ : int = 32
__magic_name__ : float = 0
__magic_name__ : float = 0
__magic_name__ : bool = False
__magic_name__ : int = 4
__magic_name__ : Optional[int] = 128
__magic_name__ : "StructureModuleConfig" = None
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
if self.structure_module is None:
UpperCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase ):
UpperCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
UpperCAmelCase = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__:
__magic_name__ : int = 384
__magic_name__ : int = 128
__magic_name__ : int = 16
__magic_name__ : int = 128
__magic_name__ : int = 12
__magic_name__ : int = 4
__magic_name__ : int = 8
__magic_name__ : float = 0.1
__magic_name__ : int = 8
__magic_name__ : int = 1
__magic_name__ : int = 2
__magic_name__ : int = 7
__magic_name__ : int = 10
__magic_name__ : float = 1e-8
__magic_name__ : float = 1e5
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
return asdict(self )
def lowerCamelCase__ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
_lowercase : Optional[int] = r"""
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `\" / \"`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `\" // \"`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `\"train\"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `\"compressed\"`)
The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and
`\"compressed\"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a \"dummy\" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
"""
@add_start_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "rag"
__magic_name__ : Any = True
def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=" / " , lowerCAmelCase : Dict=" // " , lowerCAmelCase : int=5 , lowerCAmelCase : Optional[Any]=300 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : int="wiki_dpr" , lowerCAmelCase : Optional[int]="train" , lowerCAmelCase : Any="compressed" , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Optional[Any] , )-> int:
"""simple docstring"""
super().__init__(
bos_token_id=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , prefix=lowerCAmelCase , vocab_size=lowerCAmelCase , **lowerCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
UpperCAmelCase = kwargs.pop('''question_encoder''' )
UpperCAmelCase = question_encoder_config.pop('''model_type''' )
UpperCAmelCase = kwargs.pop('''generator''' )
UpperCAmelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = AutoConfig.for_model(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = reduce_loss
UpperCAmelCase = label_smoothing
UpperCAmelCase = exclude_bos_score
UpperCAmelCase = do_marginalize
UpperCAmelCase = title_sep
UpperCAmelCase = doc_sep
UpperCAmelCase = n_docs
UpperCAmelCase = max_combined_length
UpperCAmelCase = dataset
UpperCAmelCase = dataset_split
UpperCAmelCase = index_name
UpperCAmelCase = retrieval_vector_size
UpperCAmelCase = retrieval_batch_size
UpperCAmelCase = passages_path
UpperCAmelCase = index_path
UpperCAmelCase = use_dummy_dataset
UpperCAmelCase = output_retrieved
UpperCAmelCase = do_deduplication
UpperCAmelCase = use_cache
if self.forced_eos_token_id is None:
UpperCAmelCase = getattr(self.generator , '''forced_eos_token_id''' , lowerCAmelCase )
@classmethod
def a__( cls : Any , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Optional[int] )-> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase )
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.question_encoder.to_dict()
UpperCAmelCase = self.generator.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : Any , A : Dict , A : str , A : Optional[Any]=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading PyTorch weights from {pt_path}""" )
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
logger.info(f"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
UpperCAmelCase = convert_pytorch_state_dict_to_flax(A , A )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase = convert_pytorch_sharded_state_dict_to_flax(A , A )
return flax_state_dict
def lowerCamelCase__ ( A : Tuple[str] , A : np.ndarray , A : Dict[str, jnp.ndarray] , A : str , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(A : Tuple[str] ) -> bool:
return len(set(A ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(A ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(A ):
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase = pt_tuple_key[-2] + '''_v'''
if name is not None:
UpperCAmelCase = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( A : int , A : int ):
'''simple docstring'''
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(A )
UpperCAmelCase = {}
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : Optional[int] , A : str ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase = torch.load(A )
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase = flax_model.params['''params''']
UpperCAmelCase = flatten_dict(A )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCAmelCase = flax_model.params
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCAmelCase = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(
A , A , A , A )
# add model prefix if necessary
UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
if "var" in flax_key[-1]:
UpperCAmelCase = jnp.asarray(A )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(A , A )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
def lowerCamelCase__ ( A : List[str] , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = os.path.abspath(A )
logger.info(f"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
UpperCAmelCase = getattr(A , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(A , '''rb''' ) as state_f:
try:
UpperCAmelCase = from_bytes(A , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(A , A )
def lowerCamelCase__ ( A : Tuple , A : List[Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda A : x.dtype == jnp.bfloataa , A ) ).values()
if any(A ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCAmelCase = jax.tree_util.tree_map(
lambda A : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , A )
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = pt_model.state_dict()
UpperCAmelCase = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase = []
UpperCAmelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(A ) not in pt_model_dict:
# conv layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = jnp.transpose(A , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(A ) not in pt_model_dict:
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
UpperCAmelCase = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase = '''.'''.join(A )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase = key.split('''.''' )
UpperCAmelCase = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase = key_components[-2] + '''_v'''
if name is not None:
UpperCAmelCase = key_components[:-3] + [name]
UpperCAmelCase = '''.'''.join(A )
UpperCAmelCase = key
if flax_key in special_pt_names:
UpperCAmelCase = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
UpperCAmelCase = np.asarray(A ) if not isinstance(A , np.ndarray ) else flax_tensor
UpperCAmelCase = torch.from_numpy(A )
# remove from missing keys
missing_keys.remove(A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(A )
pt_model.load_state_dict(A )
# re-transform missing_keys to list
UpperCAmelCase = list(A )
if len(A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(f"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(A ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
''' use it for predictions and inference.''' )
else:
logger.warning(
f"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
'''If your task is similar to the task the model of the checkpoint was trained on, '''
f"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import os
def lowerCamelCase__ ( A : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(A ) , A ) ) as input_file:
UpperCAmelCase = [
[int(A ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCAmelCase = len(A )
UpperCAmelCase = len(matrix[0] )
UpperCAmelCase = [[-1 for _ in range(A )] for _ in range(A )]
for i in range(A ):
UpperCAmelCase = matrix[i][0]
for j in range(1 , A ):
for i in range(A ):
UpperCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A ):
UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowercase : List[str] = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowercase : Optional[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowercase : Any = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__( datasets.Metric ):
def a__( self : Optional[int] )-> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def a__( self : Optional[Any] , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , )-> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase )
}
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_lowercase : Tuple = logging.get_logger(__name__)
class lowerCamelCase_( lowerCAmelCase ):
__magic_name__ : str = ["audio_values", "audio_mask"]
def __init__( self : Tuple , lowerCAmelCase : str=2048 , lowerCAmelCase : str=1 , lowerCAmelCase : Any=[16, 16] , lowerCAmelCase : List[str]=128 , lowerCAmelCase : Tuple=44100 , lowerCAmelCase : Dict=86 , lowerCAmelCase : List[str]=2048 , lowerCAmelCase : int=0.0 , **lowerCAmelCase : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = spectrogram_length
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = feature_size // self.patch_size[1]
UpperCAmelCase = n_fft
UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
UpperCAmelCase = sampling_rate
UpperCAmelCase = padding_value
UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def a__( self : Optional[int] , lowerCAmelCase : np.array )-> np.ndarray:
"""simple docstring"""
UpperCAmelCase = spectrogram(
lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
UpperCAmelCase = log_spec[:, :-1]
UpperCAmelCase = log_spec - 20.0
UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , **lowerCAmelCase : Optional[int] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
UpperCAmelCase = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
UpperCAmelCase = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
UpperCAmelCase = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
UpperCAmelCase = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCAmelCase = np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCAmelCase = np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = audio_features[i]
UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
UpperCAmelCase = {'''audio_values''': padded_audio_features}
UpperCAmelCase = BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = image.size
UpperCAmelCase , UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCAmelCase = np.array(A ).astype(np.floataa ) / 255.0
UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase = torch.from_numpy(A )
return 2.0 * image - 1.0
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Tuple , lowerCAmelCase : VQModel , lowerCAmelCase : UNetaDModel , lowerCAmelCase : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , )-> Any:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : Optional[int] = 100 , lowerCAmelCase : Optional[float] = 0.0 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , )-> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase = 1
elif isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase )}""" )
if isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase = preprocess(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase = next(self.unet.parameters() ).dtype
UpperCAmelCase = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
UpperCAmelCase = image.to(device=self.device , dtype=lowerCAmelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase , device=self.device )
UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
for t in self.progress_bar(lowerCAmelCase ):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase = torch.cat([latents, image] , dim=1 )
UpperCAmelCase = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase = self.vqvae.decode(lowerCAmelCase ).sample
UpperCAmelCase = torch.clamp(lowerCAmelCase , -1.0 , 1.0 )
UpperCAmelCase = image / 2 + 0.5
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCAmelCase = model.generate(**lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCAmelCase = model_reloaded.generate(**lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase )
UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase ):
model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase )
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase__ ( A : Dict , A : Dict , A : Optional[int]=[] ):
'''simple docstring'''
UpperCAmelCase = size[0] - overlap_pixels * 2
UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
UpperCAmelCase = np.pad(A , mode='''linear_ramp''' , pad_width=A , end_values=0 )
if "l" in remove_borders:
UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase__ ( A : List[Any] , A : Union[str, Any] , A : Union[str, Any] ):
'''simple docstring'''
return max(A , min(A , A ) )
def lowerCamelCase__ ( A : [int] , A : [int] , A : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCamelCase__ ( A : [int] , A : int , A : [int] ):
'''simple docstring'''
UpperCAmelCase = list(A )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCAmelCase = clamp_rect(A , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCamelCase__ ( A : Tuple , A : List[str] , A : List[str] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(A , (original_slice, 0) )
return result
def lowerCamelCase__ ( A : str , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCAmelCase = tile.crop(A )
return tile
def lowerCamelCase__ ( A : Dict , A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = n % d
return n - divisor
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Dict , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : int = 350 , )-> List[Any]:
"""simple docstring"""
super().__init__(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , low_res_scheduler=lowerCAmelCase , scheduler=lowerCAmelCase , max_noise_level=lowerCAmelCase , )
def a__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCAmelCase = add_overlap_rect(lowerCAmelCase , lowerCAmelCase , image.size )
UpperCAmelCase = image.crop(lowerCAmelCase )
UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
UpperCAmelCase = max(0 , lowerCAmelCase )
UpperCAmelCase = squeeze_tile(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = to_input.size
UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCAmelCase = super(lowerCAmelCase , self ).__call__(image=lowerCAmelCase , **lowerCAmelCase ).images[0]
UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCAmelCase = unsqueeze_tile(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCAmelCase = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=lowerCAmelCase ) , mode='''L''' , )
final_image.paste(
lowerCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , lowerCAmelCase )
@torch.no_grad()
def __call__( self : str , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCAmelCase : int = 75 , lowerCAmelCase : float = 9.0 , lowerCAmelCase : int = 50 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 128 , lowerCAmelCase : int = 32 , lowerCAmelCase : int = 32 , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
UpperCAmelCase = math.ceil(image.size[0] / tile_size )
UpperCAmelCase = math.ceil(image.size[1] / tile_size )
UpperCAmelCase = tcx * tcy
UpperCAmelCase = 0
for y in range(lowerCAmelCase ):
for x in range(lowerCAmelCase ):
self._process_tile(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , prompt=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , noise_level=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''stabilityai/stable-diffusion-x4-upscaler'''
UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(A , revision='''fp16''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to('''cuda''' )
UpperCAmelCase = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(A : Optional[Any] ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
UpperCAmelCase = pipe(image=A , prompt='''Black font, white background, vector''' , noise_level=40 , callback=A )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase : Dict = get_logger(__name__)
def lowerCamelCase__ ( A : List[Any] , A : str , A : Dict , A : int , A : List[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(A , A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving model to {ckpt_dir}""" )
UpperCAmelCase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=A , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def lowerCamelCase__ ( A : Optional[int] , A : List[str] , A : Union[str, Any] , A : Dict , A : List[Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
UpperCAmelCase = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
UpperCAmelCase = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading model from {input_model_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
UpperCAmelCase = (
os.path.join(A , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
UpperCAmelCase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=A , storage_reader=dist_cp.FileSystemReader(A ) , planner=DefaultLoadPlanner() , )
UpperCAmelCase = state_dict['''model''']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(A )
def lowerCamelCase__ ( A : str , A : Any , A : List[Any] , A : Dict , A : int , A : Optional[Any]=0 ):
'''simple docstring'''
os.makedirs(A , exist_ok=A )
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
UpperCAmelCase = FSDP.optim_state_dict(A , A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(A , A )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
UpperCAmelCase = os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(A , exist_ok=A )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict , A : Union[str, Any] , A : Any , A : Tuple , A : int=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
UpperCAmelCase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
UpperCAmelCase = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
UpperCAmelCase = os.path.join(A , A )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
UpperCAmelCase = torch.load(A )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
UpperCAmelCase = (
os.path.join(A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
UpperCAmelCase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(A ) , )
UpperCAmelCase = optim_state['''optimizer''']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
UpperCAmelCase = FSDP.optim_state_dict_to_load(A , A , A )
optimizer.load_state_dict(A )
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : List[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Any = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
_lowercase : Optional[int] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = ElectraTokenizer
def __init__( self : int , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : Any="[SEP]" , lowerCAmelCase : Dict="[PAD]" , lowerCAmelCase : Tuple="[CLS]" , lowerCAmelCase : str="[MASK]" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase )
UpperCAmelCase = do_lower_case
def a__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
import operator
def lowerCamelCase__ ( A : list , A : bool = False , A : list | None = None ):
UpperCAmelCase = operator.lt if reverse else operator.gt
UpperCAmelCase = solution or []
if not arr:
return solution
UpperCAmelCase = [arr.pop(0 )]
for i, item in enumerate(A ):
if _operator(A , sublist[-1] ):
sublist.append(A )
arr.pop(A )
# merging sublist into solution list
if not solution:
solution.extend(A )
else:
while sublist:
UpperCAmelCase = sublist.pop(0 )
for i, xx in enumerate(A ):
if not _operator(A , A ):
solution.insert(A , A )
break
else:
solution.append(A )
strand_sort(A , A , A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase = 10_24
UpperCAmelCase = 40_96
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [2_56, 5_12, 10_24, 10_24]
UpperCAmelCase = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase = 7_68
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = [2_56, 5_12, 7_68, 7_68]
UpperCAmelCase = 1_50
UpperCAmelCase = 16
UpperCAmelCase = (1, 3_84, 3_84)
UpperCAmelCase = False
UpperCAmelCase = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 7_68
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = 1_50
UpperCAmelCase = 16
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''ade20k-id2label.json'''
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(A , A , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def lowerCamelCase__ ( A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(A , A )
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def lowerCamelCase__ ( A : Any , A : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( A : Dict , A : List[str] , A : Any , A : Dict , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = get_dpt_config(A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase = torch.load(A , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(A )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(A )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(A , A )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(A ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# Check outputs on an image
UpperCAmelCase = 4_80 if '''ade''' in checkpoint_url else 3_84
UpperCAmelCase = DPTImageProcessor(size=A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(A , return_tensors='''pt''' )
# forward pass
UpperCAmelCase = model(**A ).logits if '''ade''' in checkpoint_url else model(**A ).predicted_depth
if show_prediction:
UpperCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(A ).mkdir(exist_ok=A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
_lowercase : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( A : tuple[int, int] , A : int ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = position
UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase = []
for position in positions:
UpperCAmelCase , UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(A )
return permissible_positions
def lowerCamelCase__ ( A : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowerCamelCase__ ( A : list[list[int]] , A : tuple[int, int] , A : int ):
'''simple docstring'''
if is_complete(A ):
return True
for position in get_valid_pos(A , len(A ) ):
UpperCAmelCase , UpperCAmelCase = position
if board[y][x] == 0:
UpperCAmelCase = curr + 1
if open_knight_tour_helper(A , A , curr + 1 ):
return True
UpperCAmelCase = 0
return False
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [[0 for i in range(A )] for j in range(A )]
for i in range(A ):
for j in range(A ):
UpperCAmelCase = 1
if open_knight_tour_helper(A , (i, j) , 1 ):
return board
UpperCAmelCase = 0
UpperCAmelCase = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
_lowercase : int = logging.get_logger(__name__)
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Any , lowerCAmelCase : str=None , **lowerCAmelCase : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowerCAmelCase , )
super().__init__(args=lowerCAmelCase , **lowerCAmelCase )
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
def a__( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCAmelCase = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCAmelCase , variant=lowerCAmelCase ) )
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
def lowerCamelCase__ ( A : str , A : int ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(A ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_lowercase : int = """true"""
def lowerCamelCase__ ( A : Union[str, Any] , A : Union[str, Any]=82 , A : Optional[int]=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase = RegressionModel()
UpperCAmelCase = deepcopy(A )
UpperCAmelCase = RegressionDataset(length=A )
UpperCAmelCase = DataLoader(A , batch_size=A )
model.to(accelerator.device )
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(A , A )
return model, ddp_model, dataloader
def lowerCamelCase__ ( A : Accelerator , A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(A : Any ):
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
with accelerator.main_process_first():
UpperCAmelCase = dataset.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : Tuple ):
if use_longest:
return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(A , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(A , shuffle=A , collate_fn=A , batch_size=16 )
def lowerCamelCase__ ( A : Any , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = Accelerator(dispatch_batches=A , split_batches=A )
UpperCAmelCase = get_dataloader(A , not dispatch_batches )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=A )
UpperCAmelCase , UpperCAmelCase = accelerator.prepare(A , A )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( A : str , A : List[Any] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = []
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase = batch.values()
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase , UpperCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(A )
targs.append(A )
UpperCAmelCase , UpperCAmelCase = torch.cat(A ), torch.cat(A )
return logits, targs
def lowerCamelCase__ ( A : Accelerator , A : Dict=82 , A : Dict=False , A : Dict=False , A : List[Any]=16 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_basic_setup(A , A , A )
UpperCAmelCase , UpperCAmelCase = generate_predictions(A , A , A )
assert (
len(A ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A )}"""
def lowerCamelCase__ ( A : bool = False , A : bool = False ):
'''simple docstring'''
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase , UpperCAmelCase = get_mrpc_setup(A , A )
# First do baseline
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = setup['''no''']
model.to(A )
model.eval()
for batch in dataloader:
batch.to(A )
with torch.inference_mode():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A , references=batch['''labels'''] )
UpperCAmelCase = metric.compute()
# Then do distributed
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase = batch['''labels''']
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A , references=A )
UpperCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(A , A )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase = Accelerator(split_batches=A , dispatch_batches=A )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(A , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase = Accelerator()
test_torch_metrics(A , 5_12 )
accelerator.state._reset_state()
def lowerCamelCase__ ( A : Any ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : str = logging.get_logger(__name__)
class UpperCamelCase__( enum.Enum ):
__magic_name__ : int = 0
__magic_name__ : Optional[int] = 1
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = "generated"
def __init__( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a__( self : Tuple , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : int , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = {}
if truncation is not None:
UpperCAmelCase = truncation
UpperCAmelCase = generate_kwargs
UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase = self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
if len(lowerCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
return True
def a__( self : Any , *lowerCAmelCase : Dict , lowerCAmelCase : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , lowerCAmelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase = ([prefix + arg for arg in args[0]],)
UpperCAmelCase = True
elif isinstance(args[0] , lowerCAmelCase ):
UpperCAmelCase = (prefix + args[0],)
UpperCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase = self.tokenizer(*lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = super().__call__(*lowerCAmelCase , **lowerCAmelCase )
if (
isinstance(args[0] , lowerCAmelCase )
and all(isinstance(lowerCAmelCase , lowerCAmelCase ) for el in args[0] )
and all(len(lowerCAmelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a__( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCAmelCase : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._parse_and_tokenize(lowerCAmelCase , truncation=lowerCAmelCase , **lowerCAmelCase )
return inputs
def a__( self : str , lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple )-> Any:
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase , UpperCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase , UpperCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(lowerCAmelCase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase = self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase = output_ids.reshape(lowerCAmelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase = tf.reshape(lowerCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=ReturnType.TEXT , lowerCAmelCase : List[str]=False )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "summary"
def __call__( self : Dict , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : str )-> Optional[Any]:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(lowerCAmelCase )
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Any = "translation"
def a__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a__( self : int , *lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]=None )-> int:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , lowerCAmelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCAmelCase , return_tensors=self.framework , truncation=lowerCAmelCase , src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase )
else:
return super()._parse_and_tokenize(*lowerCAmelCase , truncation=lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , **lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = super()._sanitize_parameters(**lowerCAmelCase )
if src_lang is not None:
UpperCAmelCase = src_lang
if tgt_lang is not None:
UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase = kwargs.get('''task''' , self.task )
UpperCAmelCase = task.split('''_''' )
if task and len(lowerCAmelCase ) == 4:
# translation, XX, to YY
UpperCAmelCase = items[1]
UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , *lowerCAmelCase : str , **lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase )
| 705
|
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple=1 ):
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def lowerCamelCase__ ( A : int , A : Optional[Any]=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item.replace('''in_layers.0''' , '''norm1''' )
UpperCAmelCase = new_item.replace('''in_layers.2''' , '''conv1''' )
UpperCAmelCase = new_item.replace('''out_layers.0''' , '''norm2''' )
UpperCAmelCase = new_item.replace('''out_layers.3''' , '''conv2''' )
UpperCAmelCase = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
UpperCAmelCase = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Any , A : int=0 ):
'''simple docstring'''
UpperCAmelCase = []
for old_item in old_list:
UpperCAmelCase = old_item
UpperCAmelCase = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
UpperCAmelCase = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
UpperCAmelCase = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
UpperCAmelCase = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
UpperCAmelCase = shave_segments(A , n_shave_prefix_segments=A )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def lowerCamelCase__ ( A : Tuple , A : Union[str, Any] , A : int , A : Dict=None , A : Optional[int]=None , A : Optional[Any]=None ):
'''simple docstring'''
assert isinstance(A , A ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCAmelCase = old_checkpoint[path]
UpperCAmelCase = old_tensor.shape[0] // 3
UpperCAmelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCAmelCase = old_tensor.shape[0] // config['''num_head_channels'''] // 3
UpperCAmelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = old_tensor.split(channels // num_heads , dim=1 )
UpperCAmelCase = query.reshape(A )
UpperCAmelCase = key.reshape(A )
UpperCAmelCase = value.reshape(A )
for path in paths:
UpperCAmelCase = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCAmelCase = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
UpperCAmelCase = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
UpperCAmelCase = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCAmelCase = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCAmelCase = old_checkpoint[path['''old''']][:, :, 0]
else:
UpperCAmelCase = old_checkpoint[path['''old''']]
def lowerCamelCase__ ( A : Union[str, Any] , A : Dict ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = checkpoint['''time_embed.0.weight''']
UpperCAmelCase = checkpoint['''time_embed.0.bias''']
UpperCAmelCase = checkpoint['''time_embed.2.weight''']
UpperCAmelCase = checkpoint['''time_embed.2.bias''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase = checkpoint['''out.0.weight''']
UpperCAmelCase = checkpoint['''out.0.bias''']
UpperCAmelCase = checkpoint['''out.2.weight''']
UpperCAmelCase = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the middle blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A )
}
# Retrieves the keys for the output blocks only
UpperCAmelCase = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
UpperCAmelCase = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A )
}
for i in range(1 , A ):
UpperCAmelCase = (i - 1) // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = (i - 1) % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCAmelCase = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCAmelCase = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path, resnet_op] , config=A )
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=A , config=A , )
UpperCAmelCase = middle_blocks[0]
UpperCAmelCase = middle_blocks[1]
UpperCAmelCase = middle_blocks[2]
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_resnet_paths(A )
assign_to_checkpoint(A , A , A , config=A )
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A , A , A , attention_paths_to_split=A , config=A )
for i in range(A ):
UpperCAmelCase = i // (config['''num_res_blocks'''] + 1)
UpperCAmelCase = i % (config['''num_res_blocks'''] + 1)
UpperCAmelCase = [shave_segments(A , 2 ) for name in output_blocks[i]]
UpperCAmelCase = {}
for layer in output_block_layers:
UpperCAmelCase , UpperCAmelCase = layer.split('''.''' )[0], shave_segments(A , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A )
else:
UpperCAmelCase = [layer_name]
if len(A ) > 1:
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCAmelCase = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = renew_resnet_paths(A )
UpperCAmelCase = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A , A , A , additional_replacements=[meta_path] , config=A )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCAmelCase = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCAmelCase = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A ) == 2:
UpperCAmelCase = []
if len(A ):
UpperCAmelCase = renew_attention_paths(A )
UpperCAmelCase = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCAmelCase = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A , A , A , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A , )
else:
UpperCAmelCase = renew_resnet_paths(A , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCAmelCase = '''.'''.join(['''output_blocks''', str(A ), path['''old''']] )
UpperCAmelCase = '''.'''.join(['''up_blocks''', str(A ), '''resnets''', str(A ), path['''new''']] )
UpperCAmelCase = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowercase : Dict = parser.parse_args()
_lowercase : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
_lowercase : List[str] = json.loads(f.read())
_lowercase : Union[str, Any] = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
_lowercase : Any = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
_lowercase : Tuple = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1]))
_lowercase : Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 50
| 0
|
'''simple docstring'''
import math
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = [True] * n
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCAmelCase = i * 2
while index < n:
UpperCAmelCase = False
UpperCAmelCase = index + i
UpperCAmelCase = [2]
for i in range(3 , A , 2 ):
if is_prime[i]:
primes.append(A )
return primes
def lowerCamelCase__ ( A : int = 99_99_66_66_33_33 ):
'''simple docstring'''
UpperCAmelCase = math.floor(math.sqrt(A ) ) + 1_00
UpperCAmelCase = prime_sieve(A )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = primes[prime_index]
while (last_prime**2) <= limit:
UpperCAmelCase = primes[prime_index + 1]
UpperCAmelCase = last_prime**2
UpperCAmelCase = next_prime**2
# Get numbers divisible by lps(current)
UpperCAmelCase = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCAmelCase = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCAmelCase = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCAmelCase = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 706
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = "dpr"
def __init__( self : Dict , lowerCAmelCase : Any=30522 , lowerCAmelCase : List[str]=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : Tuple="absolute" , lowerCAmelCase : int = 0 , **lowerCAmelCase : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = projection_dim
UpperCAmelCase = position_embedding_type
| 50
| 0
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase__ ( A : Optional[Any] , A : Any , A : List[str] ):
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
UpperCAmelCase = np.concatenate(A , axis=0 )
UpperCAmelCase = np.array(A ).astype(np.floataa ) / 255.0
UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase = 2.0 * image - 1.0
UpperCAmelCase = torch.from_numpy(A )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase = torch.cat(A , dim=0 )
return image
def lowerCamelCase__ ( A : str , A : str , A : List[Any] , A : Optional[int]=0.9_995 ):
'''simple docstring'''
if not isinstance(A , np.ndarray ):
UpperCAmelCase = True
UpperCAmelCase = va.device
UpperCAmelCase = va.cpu().numpy()
UpperCAmelCase = va.cpu().numpy()
UpperCAmelCase = np.sum(va * va / (np.linalg.norm(A ) * np.linalg.norm(A )) )
if np.abs(A ) > DOT_THRESHOLD:
UpperCAmelCase = (1 - t) * va + t * va
else:
UpperCAmelCase = np.arccos(A )
UpperCAmelCase = np.sin(A )
UpperCAmelCase = theta_a * t
UpperCAmelCase = np.sin(A )
UpperCAmelCase = np.sin(theta_a - theta_t ) / sin_theta_a
UpperCAmelCase = sin_theta_t / sin_theta_a
UpperCAmelCase = sa * va + sa * va
if inputs_are_torch:
UpperCAmelCase = torch.from_numpy(A ).to(A )
return va
def lowerCamelCase__ ( A : Optional[Any] , A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = F.normalize(A , dim=-1 )
UpperCAmelCase = F.normalize(A , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCamelCase__ ( A : Union[str, Any] , A : Any ):
'''simple docstring'''
for param in model.parameters():
UpperCAmelCase = value
class UpperCamelCase__( lowerCAmelCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowerCAmelCase : CLIPFeatureExtractor , lowerCAmelCase : List[str]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None , )-> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , clip_model=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , feature_extractor=lowerCAmelCase , coca_model=lowerCAmelCase , coca_tokenizer=lowerCAmelCase , coca_transform=lowerCAmelCase , )
UpperCAmelCase = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase )
else feature_extractor.size['''shortest_edge''']
)
UpperCAmelCase = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase )
set_requires_grad(self.clip_model , lowerCAmelCase )
def a__( self : Tuple , lowerCAmelCase : Optional[Union[str, int]] = "auto" )-> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
self.enable_attention_slicing(lowerCAmelCase )
def a__( self : str )-> str:
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase )
def a__( self : List[Any] )-> int:
"""simple docstring"""
set_requires_grad(self.vae , lowerCAmelCase )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
set_requires_grad(self.unet , lowerCAmelCase )
def a__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowerCAmelCase )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]=None )-> List[str]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase )}""" )
UpperCAmelCase = image.to(device=lowerCAmelCase , dtype=lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase )
]
UpperCAmelCase = torch.cat(lowerCAmelCase , dim=0 )
else:
UpperCAmelCase = self.vae.encode(lowerCAmelCase ).latent_dist.sample(lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 0.18215 * init_latents
UpperCAmelCase = init_latents.repeat_interleave(lowerCAmelCase , dim=0 )
UpperCAmelCase = randn_tensor(init_latents.shape , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
# get latents
UpperCAmelCase = self.scheduler.add_noise(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = init_latents
return latents
def a__( self : List[str] , lowerCAmelCase : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.coca_transform(lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
UpperCAmelCase = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
UpperCAmelCase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.feature_extractor.preprocess(lowerCAmelCase )
UpperCAmelCase = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
UpperCAmelCase = self.clip_model.get_image_features(lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip.repeat_interleave(lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , )-> Tuple:
"""simple docstring"""
UpperCAmelCase = latents.detach().requires_grad_()
UpperCAmelCase = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
UpperCAmelCase = self.scheduler.alphas_cumprod[timestep]
UpperCAmelCase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
UpperCAmelCase = torch.sqrt(lowerCAmelCase )
UpperCAmelCase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase ):
UpperCAmelCase = self.scheduler.sigmas[index]
UpperCAmelCase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 1 / 0.18215 * sample
UpperCAmelCase = self.vae.decode(lowerCAmelCase ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase )
UpperCAmelCase = self.normalize(lowerCAmelCase ).to(latents.dtype )
UpperCAmelCase = self.clip_model.get_image_features(lowerCAmelCase )
UpperCAmelCase = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase )
UpperCAmelCase = spherical_dist_loss(lowerCAmelCase , lowerCAmelCase ).mean() * clip_guidance_scale
UpperCAmelCase = -torch.autograd.grad(lowerCAmelCase , lowerCAmelCase )[0]
if isinstance(self.scheduler , lowerCAmelCase ):
UpperCAmelCase = latents.detach() + grads * (sigma**2)
UpperCAmelCase = noise_pred_original
else:
UpperCAmelCase = noise_pred_original - torch.sqrt(lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = 512 , lowerCAmelCase : Optional[int] = 512 , lowerCAmelCase : float = 0.6 , lowerCAmelCase : Optional[int] = 50 , lowerCAmelCase : Optional[float] = 7.5 , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[float] = 100 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : float = 0.8 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : float = 0.1 , )-> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(lowerCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(lowerCAmelCase , torch.Generator ) and batch_size > 1:
UpperCAmelCase = [generator] + [None] * (batch_size - 1)
UpperCAmelCase = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
UpperCAmelCase = [x[0] for x in coca_is_none if x[1]]
UpperCAmelCase = ''', '''.join(lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
UpperCAmelCase = self.get_image_description(lowerCAmelCase )
if style_prompt is None:
if len(lowerCAmelCase ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
UpperCAmelCase = self.get_image_description(lowerCAmelCase )
# get prompt text embeddings for content and style
UpperCAmelCase = self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase = self.tokenizer(
lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='''pt''' , )
UpperCAmelCase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
UpperCAmelCase = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase = text_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# set timesteps
UpperCAmelCase = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
UpperCAmelCase = {}
if accepts_offset:
UpperCAmelCase = 1
self.scheduler.set_timesteps(lowerCAmelCase , **lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowerCAmelCase , lowerCAmelCase , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowerCAmelCase )
# Preprocess image
UpperCAmelCase = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
UpperCAmelCase = preprocess(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.prepare_latents(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , text_embeddings.dtype , self.device , lowerCAmelCase )
UpperCAmelCase = slerp(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if clip_guidance_scale > 0:
UpperCAmelCase = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_clip_image_embeddings(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = slerp(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase = content_text_input.input_ids.shape[-1]
UpperCAmelCase = self.tokenizer([''''''] , padding='''max_length''' , max_length=lowerCAmelCase , return_tensors='''pt''' )
UpperCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
UpperCAmelCase = uncond_embeddings.repeat_interleave(lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
UpperCAmelCase = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device='''cpu''' , dtype=lowerCAmelCase ).to(
self.device )
else:
UpperCAmelCase = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase = {}
if accepts_eta:
UpperCAmelCase = eta
# check if the scheduler accepts generator
UpperCAmelCase = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
UpperCAmelCase = generator
with self.progress_bar(total=lowerCAmelCase ):
for i, t in enumerate(lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
# predict the noise residual
UpperCAmelCase = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
UpperCAmelCase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
UpperCAmelCase , UpperCAmelCase = self.cond_fn(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
UpperCAmelCase = 1 / 0.18215 * latents
UpperCAmelCase = self.vae.decode(lowerCAmelCase ).sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase )
| 707
|
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
_lowercase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowerCamelCase__ ( A : Optional[Any] ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowerCamelCase__ ( A : Any , A : str ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
elif args.student_type == "gpt2":
UpperCAmelCase = False
def lowerCamelCase__ ( A : List[Any] , A : List[str] ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCAmelCase = False
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=A , required=A , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=A , required=A , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=A , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=A , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=A , required=A , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=A , type=A , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=A , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=A , required=A , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=A , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=A , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=A , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=A , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=A , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=A , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=A , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=A , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=A , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=A , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=A , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=A , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=A , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=A , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=A , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=A , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=A , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=A , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=A , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=A , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=A , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=A , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=A , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=A , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=A , default=40_00 , help='''Checkpoint interval.''' )
UpperCAmelCase = parser.parse_args()
sanity_checks(A )
# ARGS #
init_gpu_params(A )
set_seed(A )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(A ) , A , indent=4 )
git_log(args.dump_path )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.student_type]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase = tokenizer.all_special_tokens.index(A )
UpperCAmelCase = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
UpperCAmelCase = special_tok_ids
UpperCAmelCase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
UpperCAmelCase = pickle.load(A )
UpperCAmelCase = np.maximum(A , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase = 0.0 # do not predict special tokens
UpperCAmelCase = torch.from_numpy(A )
else:
UpperCAmelCase = None
UpperCAmelCase = LmSeqsDataset(params=A , data=A )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
UpperCAmelCase = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
UpperCAmelCase = student_model_class.from_pretrained(args.student_pretrained_weights , config=A )
else:
UpperCAmelCase = student_model_class(A )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
UpperCAmelCase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=A )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(A , A )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(A , A )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase = Distiller(
params=A , dataset=A , token_probs=A , student=A , teacher=A )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import math
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(A )
def lowerCamelCase__ ( A : float = 1 / 1_23_45 ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 3
while True:
UpperCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(A ):
UpperCAmelCase = int(A )
total_partitions += 1
if check_partition_perfect(A ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(A )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 708
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Tuple = ["image_processor", "tokenizer"]
__magic_name__ : Any = "ViTImageProcessor"
__magic_name__ : str = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : List[str] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : str , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[str] )-> List[Any]:
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
UpperCAmelCase = self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if visual_prompt is not None and images is not None:
UpperCAmelCase = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
UpperCAmelCase = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def a__( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def a__( self : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def a__( self : Any )-> List[Any]:
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 50
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
_lowercase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_lowercase : Union[str, Any] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = CamembertTokenizer
__magic_name__ : Dict = CamembertTokenizerFast
__magic_name__ : int = True
__magic_name__ : Optional[int] = True
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase ) , 1004 )
def a__( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase , )
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = ["""MaskFormerFeatureExtractor"""]
_lowercase : Dict = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_lowercase : List[Any] = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 50
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase__:
def __init__( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : str=3 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Optional[int]=[8, 16, 32, 64] , lowerCAmelCase : Tuple=[1, 1, 2, 1] , lowerCAmelCase : str=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[int]="relu" , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=["stage2", "stage3", "stage4"] , lowerCAmelCase : List[Any]=[2, 3, 4] , lowerCAmelCase : int=1 , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
UpperCAmelCase = num_groups
def a__( self : str )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a__( self : List[Any] )-> int:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = BitModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = BitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : int , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = BitBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase = None
UpperCAmelCase = BitBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__( self : Any )-> str:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__magic_name__ : Tuple = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ : Union[str, Any] = False
__magic_name__ : Any = False
__magic_name__ : Any = False
__magic_name__ : int = False
__magic_name__ : List[str] = False
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = BitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : Tuple )-> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def a__( self : int )-> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def a__( self : List[str] )-> Dict:
"""simple docstring"""
pass
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def a__( self : List[str] )-> str:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str ):
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
pass
def a__( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BitModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a__( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
__magic_name__ : int = BitConfig
__magic_name__ : Union[str, Any] = False
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = BitModelTester(self )
| 710
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[Any] = 16
_lowercase : Dict = 32
def lowerCamelCase__ ( A : Accelerator , A : int = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
A , padding='''longest''' , max_length=A , pad_to_multiple_of=A , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A )
UpperCAmelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( A : Optional[Any] , A : Tuple ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A ) == "1":
UpperCAmelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['''lr''']
UpperCAmelCase = int(config['''num_epochs'''] )
UpperCAmelCase = int(config['''seed'''] )
UpperCAmelCase = int(config['''batch_size'''] )
set_seed(A )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(A , A )
UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=A )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=A , num_warmup_steps=1_00 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
A , A , A , A , A )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase = os.path.split(A )[-1].split('''.''' )[0]
accelerator.init_trackers(A , A )
# Now we train the model
for epoch in range(A ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**A )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A , references=A , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(A ),
'''epoch''': epoch,
} , step=A , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A , default=A , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=A , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(A , A )
if __name__ == "__main__":
main()
| 50
| 0
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 711
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : int = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
from math import sqrt
def lowerCamelCase__ ( A : int = 1_00_00_00 ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[str] = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase : Tuple=24000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , )-> List[str]:
"""simple docstring"""
UpperCAmelCase = target_bandwidths
UpperCAmelCase = sampling_rate
UpperCAmelCase = audio_channels
UpperCAmelCase = normalize
UpperCAmelCase = chunk_length_s
UpperCAmelCase = overlap
UpperCAmelCase = hidden_size
UpperCAmelCase = num_filters
UpperCAmelCase = num_residual_layers
UpperCAmelCase = upsampling_ratios
UpperCAmelCase = norm_type
UpperCAmelCase = kernel_size
UpperCAmelCase = last_kernel_size
UpperCAmelCase = residual_kernel_size
UpperCAmelCase = dilation_growth_rate
UpperCAmelCase = use_causal_conv
UpperCAmelCase = pad_mode
UpperCAmelCase = compress
UpperCAmelCase = num_lstm_layers
UpperCAmelCase = trim_right_ratio
UpperCAmelCase = codebook_size
UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowerCAmelCase )
@property
def a__( self : str )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__( self : List[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__( self : int )-> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : str = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Any = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowercase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
_lowercase : int = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
_lowercase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowercase : List[Any] = F"""down_blocks.{i}.resnets.{j}."""
_lowercase : Union[str, Any] = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowercase : int = F"""down_blocks.{i}.attentions.{j}."""
_lowercase : Tuple = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowercase : List[str] = F"""up_blocks.{i}.resnets.{j}."""
_lowercase : Any = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowercase : Optional[Any] = F"""up_blocks.{i}.attentions.{j}."""
_lowercase : Optional[int] = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowercase : Dict = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowercase : Tuple = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowercase : Union[str, Any] = F"""up_blocks.{i}.upsamplers.0."""
_lowercase : Tuple = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowercase : Tuple = """mid_block.attentions.0."""
_lowercase : Optional[Any] = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowercase : int = F"""mid_block.resnets.{j}."""
_lowercase : Any = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
UpperCAmelCase = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
UpperCAmelCase = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowercase : Any = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowercase : List[str] = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowercase : Optional[int] = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowercase : Union[str, Any] = F"""down_blocks.{i}.downsamplers.0."""
_lowercase : str = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowercase : Optional[int] = F"""up_blocks.{i}.upsamplers.0."""
_lowercase : Union[str, Any] = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowercase : Tuple = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowercase : int = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowercase : Any = F"""mid_block.resnets.{i}."""
_lowercase : List[Any] = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowercase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
UpperCAmelCase = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
UpperCAmelCase = v.replace(A , A )
UpperCAmelCase = v
UpperCAmelCase = {v: vae_state_dict[k] for k, v in mapping.items()}
UpperCAmelCase = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
UpperCAmelCase = reshape_weight_for_sd(A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowercase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
_lowercase : int = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowercase : str = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowercase : Dict = {"""q""": 0, """k""": 1, """v""": 2}
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = {}
UpperCAmelCase = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
UpperCAmelCase = k[: -len('''.q_proj.weight''' )]
UpperCAmelCase = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
UpperCAmelCase = [None, None, None]
UpperCAmelCase = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
UpperCAmelCase = k[: -len('''.q_proj.bias''' )]
UpperCAmelCase = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
UpperCAmelCase = [None, None, None]
UpperCAmelCase = v
continue
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = torch.cat(A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
UpperCAmelCase = textenc_pattern.sub(lambda A : protected[re.escape(m.group(0 ) )] , A )
UpperCAmelCase = torch.cat(A )
return new_state_dict
def lowerCamelCase__ ( A : Tuple ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
_lowercase : Any = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowercase : Optional[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
_lowercase : Union[str, Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
_lowercase : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowercase : List[Any] = load_file(unet_path, device="""cpu""")
else:
_lowercase : List[Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
_lowercase : int = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
_lowercase : Tuple = load_file(vae_path, device="""cpu""")
else:
_lowercase : int = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
_lowercase : Union[str, Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
_lowercase : Union[str, Any] = load_file(text_enc_path, device="""cpu""")
else:
_lowercase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
_lowercase : int = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
_lowercase : Optional[int] = convert_unet_state_dict(unet_state_dict)
_lowercase : Dict = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowercase : Dict = convert_vae_state_dict(vae_state_dict)
_lowercase : Tuple = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowercase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowercase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
_lowercase : Dict = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowercase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
_lowercase : Optional[Any] = convert_text_enc_state_dict(text_enc_dict)
_lowercase : Any = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowercase : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowercase : Tuple = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowercase : Dict = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 714
|
'''simple docstring'''
import heapq
def lowerCamelCase__ ( A : dict ):
'''simple docstring'''
UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A , [-1 * len(A ), (key, value)] )
# chosen_vertices = set of chosen vertices
UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
UpperCAmelCase = heapq.heappop(A )[1][0]
chosen_vertices.add(A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
UpperCAmelCase = elem[1][1].index(A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 50
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=7 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Optional[int]=18 , lowerCAmelCase : Union[str, Any]=30 , lowerCAmelCase : List[Any]=400 , lowerCAmelCase : Any=True , lowerCAmelCase : int=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase : int=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=False , )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_reduce_labels
def a__( self : Any )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(dataset[0]['''file'''] )
UpperCAmelCase = Image.open(dataset[1]['''file'''] )
return image, map
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
UpperCAmelCase = Image.open(ds[0]['''file'''] )
UpperCAmelCase = Image.open(ds[1]['''file'''] )
UpperCAmelCase = Image.open(ds[2]['''file'''] )
UpperCAmelCase = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = BeitImageProcessingTester(self )
@property
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def a__( self : str )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def a__( self : Tuple )-> str:
"""simple docstring"""
pass
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Any )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
UpperCAmelCase = []
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
UpperCAmelCase , UpperCAmelCase = prepare_semantic_batch_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def a__( self : Dict )-> int:
"""simple docstring"""
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCAmelCase , UpperCAmelCase = prepare_semantic_single_inputs()
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
UpperCAmelCase = True
UpperCAmelCase = image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 715
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_lowercase : Optional[int] = """examples/"""
_lowercase : str = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowercase : Dict = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowercase : List[Any] = """README.md"""
def lowerCamelCase__ ( A : int , A : str , A : Optional[Any] ):
'''simple docstring'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase , UpperCAmelCase = REPLACE_PATTERNS[pattern]
UpperCAmelCase = replace.replace('''VERSION''' , A )
UpperCAmelCase = re_pattern.sub(A , A )
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(A )
def lowerCamelCase__ ( A : Optional[int] ):
'''simple docstring'''
for folder, directories, fnames in os.walk(A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(A , A ) , A , pattern='''examples''' )
def lowerCamelCase__ ( A : str , A : Dict=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A , A , A )
if not patch:
update_version_in_examples(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase = f.readlines()
# Find the start of the list.
UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(A , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A )
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
UpperCAmelCase = f.read()
UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(A ).groups()[0]
return packaging.version.parse(A )
def lowerCamelCase__ ( A : Tuple=False ):
'''simple docstring'''
UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
UpperCAmelCase = default_version.base_version
elif patch:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(A ) == 0:
UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(A , patch=A )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = get_version()
UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(A ) == 0:
UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(A )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowercase : Union[str, Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 50
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase : int = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : List[str] = "focalnet"
def __init__( self : List[Any] , lowerCAmelCase : Any=224 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : str=96 , lowerCAmelCase : List[str]=False , lowerCAmelCase : int=[192, 384, 768, 768] , lowerCAmelCase : List[Any]=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : str=4.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Optional[int]=1E-4 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Any=False , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Tuple=1E-5 , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = use_conv_embed
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = focal_levels
UpperCAmelCase = focal_windows
UpperCAmelCase = hidden_act
UpperCAmelCase = mlp_ratio
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = use_layerscale
UpperCAmelCase = layerscale_value
UpperCAmelCase = use_post_layernorm
UpperCAmelCase = use_post_layernorm_in_modulation
UpperCAmelCase = normalize_modulator
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = encoder_stride
UpperCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names )
| 716
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] , lowerCAmelCase : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = 3
UpperCAmelCase = 250
UpperCAmelCase = ids_tensor((batch_size, length) , lowerCAmelCase )
UpperCAmelCase = torch.ones((batch_size, length) , device=lowerCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaxLengthCriteria(max_length=10 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase , UpperCAmelCase = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def a__( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self._get_tensors(5 )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase , lowerCAmelCase ) )
def a__( self : int )-> Any:
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
UpperCAmelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase ) , 1 )
| 50
| 0
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=lowerCAmelCase ):
__magic_name__ : List[str] = ["note_seq"]
def __init__( self : Any , *lowerCAmelCase : List[str] , **lowerCAmelCase : int )-> Optional[int]:
"""simple docstring"""
requires_backends(self , ['''note_seq'''] )
@classmethod
def a__( cls : Dict , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
@classmethod
def a__( cls : int , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''note_seq'''] )
| 50
| 0
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = 1
UpperCAmelCase = 3
UpperCAmelCase = (32, 32)
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def a__( self : Any )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def a__( self : Dict )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
def extract(*lowerCAmelCase : str , **lowerCAmelCase : Tuple ):
class UpperCamelCase__:
def __init__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.ones([0] )
def a__( self : Dict , lowerCAmelCase : str )-> str:
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=lowerCAmelCase , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
UpperCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
UpperCAmelCase = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCAmelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.dummy_cond_unet
UpperCAmelCase = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
UpperCAmelCase = self.dummy_vae
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCAmelCase = unet.half()
UpperCAmelCase = vae.half()
UpperCAmelCase = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase = StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''A painting of a squirrel eating a burger'''
UpperCAmelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCAmelCase = 4003660346
UpperCAmelCase = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase )
UpperCAmelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCAmelCase = 2734971755
UpperCAmelCase = 7
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCAmelCase = 1044355234
UpperCAmelCase = 12
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
UpperCAmelCase = sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 718
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50
| 0
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCamelCase__ ( A : Union[str, Any]=32 , A : Dict=10 , A : int=1_00 , A : Optional[int]=10_26 , A : Dict=True , A : Dict="data/tokenized_stories_train_wikitext103.jbl" , A : Optional[int]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase , UpperCAmelCase = generate_datasets(
A , A , number=A , min_len=10_26 , trim=A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
UpperCAmelCase = load_gpta('''gpt2''' ).to(A )
print('''computing perplexity on objective set''' )
UpperCAmelCase = compute_perplexity(A , A , A ).item()
print('''perplexity on objective set:''' , A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(A , A , A , A , A , A , A , A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCamelCase__ ( A : Optional[Any] , A : List[Any]=15 , A : Any=1_28 , A : Dict=1_00 , A : Any="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase = SecondaryLearner(A )
# Train secondary learner
UpperCAmelCase = train_secondary_learner(
A , A , max_epochs=A , batch_size=A , eval_freq=1_00 , igf_model_path=A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCamelCase__ ( A : int , A : List[Any] , A : Any , A : Optional[Any]=32 , A : Union[str, Any]=10_00 , A : List[Any]=16 , A : Any=1.0 , A : List[str]=recopy_gpta , A : Any=None , A : Union[str, Any]=10 , A : Any="gpt2_finetuned.pt" , ):
UpperCAmelCase = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase = RandomSampler(A )
UpperCAmelCase = DataLoader(A , sampler=A )
UpperCAmelCase = max_steps // (len(A )) + 1
UpperCAmelCase = 0
UpperCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = recopy_model(A , A , A )
model.train()
if secondary_learner is not None:
secondary_learner.to(A )
secondary_learner.eval()
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = []
UpperCAmelCase = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase = compute_perplexity(A , A , A )
test_perps.append(A )
print('''Test perplexity, step''' , A , ''':''' , A )
for epoch in range(int(A ) ):
for step, example in enumerate(A ):
torch.cuda.empty_cache()
UpperCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase = model(A , labels=A )
UpperCAmelCase = True
if secondary_learner is not None:
UpperCAmelCase = secondary_learner.forward(
torch.tensor(A , dtype=torch.long , device=A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase = -1
if predicted_q < threshold:
UpperCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase = compute_perplexity(A , A , A )
test_perps.append(A )
print('''Test perplexity, step''' , A , ''':''' , A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCamelCase__ ( ):
UpperCAmelCase = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A , type=A , required=A , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A , type=A , required=A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=A , default=A , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=A , default=A , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=A , type=A , required=A , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=A , type=A , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=A , default=A , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=A , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=A , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=A , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=A , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=A , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=A , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=A , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=A , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=A , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=A , type=A , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=A , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=A , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=A , type=A , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=A , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
UpperCAmelCase = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
UpperCAmelCase = training_secondary_learner(
A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
UpperCAmelCase = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase , UpperCAmelCase = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
A , A , A , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=A , secondary_learner=A , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCAmelCase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
UpperCAmelCase = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(3 )
UpperCAmelCase = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase = DisjunctiveConstraint(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 2
while i * i <= n:
UpperCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = 1
UpperCAmelCase = 1
while True:
i += 1
t_num += i
if count_divisors(A ) > 5_00:
break
return t_num
if __name__ == "__main__":
print(solution())
| 720
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( A : List[Any] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__( lowerCAmelCase ):
@staticmethod
def a__( lowerCAmelCase : ArgumentParser )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=lowerCAmelCase , default=lowerCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=lowerCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool )-> Any:
"""simple docstring"""
UpperCAmelCase = model
UpperCAmelCase = cache
UpperCAmelCase = force
UpperCAmelCase = trust_remote_code
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 50
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase__:
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[str]=16 , lowerCAmelCase : Union[str, Any]=[1, 2, 1] , lowerCAmelCase : List[str]=[2, 2, 4] , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : str=2.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0.02 , lowerCAmelCase : Union[str, Any]=1E-5 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=None , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : str=["stage1", "stage2", "stage3"] , lowerCAmelCase : str=[1, 2, 3] , )-> int:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a__( self : str )-> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = MaskFormerSwinModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = MaskFormerSwinBackbone(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = model(lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCAmelCase ):
UpperCAmelCase = ['''stem''']
UpperCAmelCase = MaskFormerSwinBackbone(config=lowerCAmelCase )
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__magic_name__ : Any = False
__magic_name__ : Optional[int] = False
__magic_name__ : Dict = False
__magic_name__ : str = False
__magic_name__ : Tuple = False
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase = MaskFormerSwinModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a__( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
pass
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a__( self : Dict )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
pass
def a__( self : str )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a__( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
def a__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# Swin has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__( self : Dict )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a__( self : List[str] )-> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
pass
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase : Tuple ):
UpperCAmelCase = 0
return t
def check_equivalence(lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]={} ):
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = model(**lowerCAmelCase , return_dict=lowerCAmelCase , **lowerCAmelCase ).to_tuple()
def recursive_check(lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ):
if isinstance(lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase , lowerCAmelCase ):
recursive_check(lowerCAmelCase , lowerCAmelCase )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(lowerCAmelCase , lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCAmelCase ) , set_nan_tensor_to_zero(lowerCAmelCase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(lowerCAmelCase ).any()} and `inf`: {torch.isinf(lowerCAmelCase )}. Dict has"""
F""" `nan`: {torch.isnan(lowerCAmelCase ).any()} and `inf`: {torch.isinf(lowerCAmelCase )}."""
) , )
recursive_check(lowerCAmelCase , lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'''output_hidden_states''': True} )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
check_equivalence(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {'''output_hidden_states''': True} )
@require_torch
class UpperCamelCase__( unittest.TestCase , lowerCAmelCase ):
__magic_name__ : List[str] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__magic_name__ : str = MaskFormerSwinConfig
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = MaskFormerSwinModelTester(self )
def a__( self : Dict )-> Dict:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase = backbone_class(lowerCAmelCase )
backbone.to(lowerCAmelCase )
backbone.eval()
UpperCAmelCase = backbone(**lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase = backbone(**lowerCAmelCase , output_hidden_states=lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase = backbone(**lowerCAmelCase , output_attentions=lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 721
|
'''simple docstring'''
def lowerCamelCase__ ( A : str ):
'''simple docstring'''
assert column_title.isupper()
UpperCAmelCase = 0
UpperCAmelCase = len(A ) - 1
UpperCAmelCase = 0
while index >= 0:
UpperCAmelCase = (ord(column_title[index] ) - 64) * pow(26 , A )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( A : list , A : list , A : int , A : int , A : int ):
'''simple docstring'''
if index == number_of_items:
return 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = knapsack(A , A , A , A , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase = values[index] + knapsack(
A , A , A , max_weight - weights[index] , index + 1 )
return max(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__( unittest.TestCase ):
def __init__( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Tuple=32 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Optional[Any]=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Optional[int]=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]="relu" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : Union[str, Any]=None , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowerCAmelCase )
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModel(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowerCAmelCase )
UpperCAmelCase = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__( self : List[str] )-> Any:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ : Optional[int] = False
__magic_name__ : List[str] = False
__magic_name__ : Dict = False
def a__( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def a__( self : List[str] )-> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
return
def a__( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def a__( self : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def a__( self : Any )-> List[str]:
"""simple docstring"""
pass
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def a__( self : Tuple )-> int:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def a__( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = model_class(lowerCAmelCase )
@jax.jit
def model_jitted(lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ):
return model(pixel_values=lowerCAmelCase , **lowerCAmelCase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCamelCase__( unittest.TestCase ):
@cached_property
def a__( self : Dict )-> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowerCAmelCase , return_tensors='''np''' )
UpperCAmelCase = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase = (1, 1000)
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 50
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_lowercase : Dict = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowercase : Tuple = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase__ ( A : list[list[int]] ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(len(A ) ):
UpperCAmelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCAmelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCAmelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A )
return next_generation
def lowerCamelCase__ ( A : list[list[int]] , A : int ):
'''simple docstring'''
UpperCAmelCase = []
for _ in range(A ):
# Create output image
UpperCAmelCase = Image.new('''RGB''' , (len(cells[0] ), len(A )) )
UpperCAmelCase = img.load()
# Save cells to image
for x in range(len(A ) ):
for y in range(len(cells[0] ) ):
UpperCAmelCase = 2_55 - cells[y][x] * 2_55
UpperCAmelCase = (colour, colour, colour)
# Save image
images.append(A )
UpperCAmelCase = new_generation(A )
return images
if __name__ == "__main__":
_lowercase : int = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 701
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = """▁"""
_lowercase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
_lowercase : Any = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
_lowercase : int = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1024,
}
# fmt: off
_lowercase : int = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : int="<unk>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[Any] , )-> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase , tgt_lang=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model )
UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase )
}
UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase = src_lang if src_lang is not None else '''en_XX'''
UpperCAmelCase = self.lang_code_to_id[self._src_lang]
UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def a__( self : Union[str, Any] )-> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def a__( self : str )-> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__( self : Any , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Tuple )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self : Dict , lowerCAmelCase : Dict )-> None:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__( self : str , lowerCAmelCase : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def a__( self : Optional[int] , lowerCAmelCase : str )-> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__( self : List[Any] , lowerCAmelCase : int )-> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__( self : int , lowerCAmelCase : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''''''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(lowerCAmelCase )
UpperCAmelCase = False
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , '''wb''' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
def a__( self : List[str] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
UpperCAmelCase = [1] * len(self.prefix_tokens )
UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def a__( self : Tuple , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase = src_lang
UpperCAmelCase = self(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase = self.convert_tokens_to_ids(lowerCAmelCase )
UpperCAmelCase = tgt_lang_id
return inputs
def a__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str = "en_XX" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "ro_RO" , **lowerCAmelCase : List[str] , )-> BatchEncoding:
"""simple docstring"""
UpperCAmelCase = src_lang
UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase )
def a__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def a__( self : List[Any] )-> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__( self : List[Any] , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[src_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
def a__( self : int , lowerCAmelCase : str )-> None:
"""simple docstring"""
UpperCAmelCase = self.lang_code_to_id[tgt_lang]
UpperCAmelCase = [self.cur_lang_code_id]
UpperCAmelCase = [self.eos_token_id]
| 50
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger("""transformers.models.speecht5""")
def lowerCamelCase__ ( A : List[Any] , A : int , A : Union[str, Any] ):
'''simple docstring'''
hf_model.apply_weight_norm()
UpperCAmelCase = checkpoint['''input_conv.weight_g''']
UpperCAmelCase = checkpoint['''input_conv.weight_v''']
UpperCAmelCase = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCAmelCase = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCAmelCase = checkpoint['''output_conv.1.weight_g''']
UpperCAmelCase = checkpoint['''output_conv.1.weight_v''']
UpperCAmelCase = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase__ ( A : str , A : Dict , A : int , A : Union[str, Any]=None , A : Optional[Any]=None , ):
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(A )
else:
UpperCAmelCase = SpeechTaHifiGanConfig()
UpperCAmelCase = SpeechTaHifiGan(A )
UpperCAmelCase = torch.load(A )
load_weights(orig_checkpoint['''model''']['''generator'''] , A , A )
UpperCAmelCase = np.load(A )
UpperCAmelCase = stats[0].reshape(-1 )
UpperCAmelCase = stats[1].reshape(-1 )
UpperCAmelCase = torch.from_numpy(A ).float()
UpperCAmelCase = torch.from_numpy(A ).float()
model.save_pretrained(A )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(A )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowercase : Optional[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 702
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : List[Any] = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
'''simple docstring'''
import functools
def lowerCamelCase__ ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not isinstance(A , A ) or not all(isinstance(A , A ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(A ) != 3 or not all(isinstance(A , A ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(A ) == 0:
return 0
if min(A ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(A ) >= 3_66:
raise ValueError('''All days elements should be less than 366''' )
UpperCAmelCase = set(A )
@functools.cache
def dynamic_programming(A : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowercase : List[str] = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.