code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for attribute in key.split("." ):
lowercase_ : Dict = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
lowercase_ : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
lowercase_ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase_ : Optional[int] = value
elif weight_type == "weight_g":
lowercase_ : Dict = value
elif weight_type == "weight_v":
lowercase_ : Dict = value
elif weight_type == "bias":
lowercase_ : List[Any] = value
else:
lowercase_ : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
lowercase_ : Tuple = fairseq_model.state_dict()
lowercase_ : str = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
lowercase_ : Any = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned):
lowercase_ : Any = True
if "*" in mapped_key:
lowercase_ : int = name.split(_UpperCamelCase )[0].split("." )[-2]
lowercase_ : int = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
lowercase_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
lowercase_ : str = "weight_v"
elif "weight" in name:
lowercase_ : str = "weight"
elif "bias" in name:
lowercase_ : Union[str, Any] = "bias"
else:
lowercase_ : int = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = full_name.split("conv_layers." )[-1]
lowercase_ : Tuple = name.split("." )
lowercase_ : List[Any] = int(items[0] )
lowercase_ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase_ : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase_ : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase_ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase_ : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ):
"""simple docstring"""
if config_path is not None:
lowercase_ : Tuple = HubertConfig.from_pretrained(_UpperCamelCase )
else:
lowercase_ : Any = HubertConfig()
if is_finetuned:
if dict_path:
lowercase_ : List[str] = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ : Tuple = target_dict.pad_index
lowercase_ : Tuple = target_dict.bos_index
lowercase_ : Union[str, Any] = target_dict.eos_index
lowercase_ : List[Any] = len(target_dict.symbols )
lowercase_ : Optional[int] = os.path.join(_UpperCamelCase , "vocab.json" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _UpperCamelCase )
lowercase_ : Dict = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_UpperCamelCase , )
lowercase_ : str = True if config.feat_extract_norm == "layer" else False
lowercase_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
lowercase_ : List[Any] = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
lowercase_ : Union[str, Any] = HubertForCTC(_UpperCamelCase )
else:
lowercase_ : str = HubertModel(_UpperCamelCase )
if is_finetuned:
lowercase_ , lowercase_ , lowercase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase_ , lowercase_ , lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase_ : Union[str, Any] = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 640
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
__lowerCamelCase: Optional[datasets.Features] = None
__lowerCamelCase: str = "utf-8"
__lowerCamelCase: Optional[str] = None
__lowerCamelCase: Optional[str] = None
__lowerCamelCase: bool = True # deprecated
__lowerCamelCase: Optional[int] = None # deprecated
__lowerCamelCase: int = 10 << 20 # 10MB
__lowerCamelCase: Optional[bool] = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
__lowerCamelCase: int = JsonConfig
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
lowercase_ : Dict = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase_ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a , (str, list, tuple) ):
lowercase_ : int = data_files
if isinstance(a , a ):
lowercase_ : str = [files]
lowercase_ : Optional[Any] = [dl_manager.iter_files(a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowercase_ : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(a , a ):
lowercase_ : Dict = [files]
lowercase_ : Any = [dl_manager.iter_files(a ) for file in files]
splits.append(datasets.SplitGenerator(name=a , gen_kwargs={"files": files} ) )
return splits
def lowerCAmelCase__ ( self : Optional[int] , a : pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ : List[Any] = self.config.features.arrow_schema.field(a ).type
lowercase_ : List[Any] = pa_table.append_column(a , pa.array([None] * len(a ) , type=a ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ : str = table_cast(a , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : List[Any] = json.load(a )
# We keep only the field we are interested in
lowercase_ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(a , (list, tuple) ):
lowercase_ : int = set().union(*[row.keys() for row in dataset] )
lowercase_ : str = {col: [row.get(a ) for row in dataset] for col in keys}
else:
lowercase_ : List[Any] = dataset
lowercase_ : Optional[Any] = pa.Table.from_pydict(a )
yield file_idx, self._cast_table(a )
# If the file has one json object per line
else:
with open(a , "rb" ) as f:
lowercase_ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ : str = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
lowercase_ : Optional[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
lowercase_ : Optional[int] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(a )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ : int = batch.decode(self.config.encoding , errors=a ).encode("utf-8" )
try:
while True:
try:
lowercase_ : Union[str, Any] = paj.read_json(
io.BytesIO(a ) , read_options=paj.ReadOptions(block_size=a ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(a , pa.ArrowInvalid )
and "straddling" not in str(a )
or block_size > len(a )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"""Batch of {len(a )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
a , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ : int = json.load(a )
except json.JSONDecodeError:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(a , a ): # list is the only sequence type supported in JSON
try:
lowercase_ : List[str] = set().union(*[row.keys() for row in dataset] )
lowercase_ : Optional[int] = {col: [row.get(a ) for row in dataset] for col in keys}
lowercase_ : int = pa.Table.from_pydict(a )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise ValueError(f"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(a )
break
else:
logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" )
raise ValueError(
f"""Not able to read records in the JSON file at {file}. """
f"""You should probably indicate the field of the JSON file containing your records. """
f"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
f"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a )
batch_idx += 1
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[int] , a : List[Any] , a : str ):
'''simple docstring'''
return None
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : int , a : Union[str, Any] , a : Any , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
return None
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , "tf" , 1_2 , **a )
@require_torch
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(a , "pt" , 1_2 , **a )
@require_torch
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
from transformers import BertModel
lowercase_ : List[str] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(a ) )
vocab_file.flush()
lowercase_ : Optional[int] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase_ : Optional[int] = BertModel(BertConfig(vocab_size=len(a ) ) )
model.save_pretrained(a )
self._test_export(a , "pt" , 1_2 , a )
@require_tf
@slow
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Dict = self._test_export(a , "tf" , 1_2 , **a )
lowercase_ : List[Any] = quantize(Path(a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase_ : Any = self._test_export(a , "pt" , 1_2 , **a )
lowercase_ : int = quantize(a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(a ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase__ ( self : Optional[Any] , a : Dict , a : Union[str, Any] , a : str , a : int=None , **a : int ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase_ : Optional[int] = Path(a ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(a , a , a , a , a , **a )
return path
except Exception as e:
self.fail(a )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
from transformers import BertModel
lowercase_ : str = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ : str = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a , a , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
from transformers import TFBertModel
lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase_ : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(a , a , "tf" )
def lowerCAmelCase__ ( self : Any , a : str , a : Union[str, Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : int = FeatureExtractionPipeline(a , a )
lowercase_ : List[str] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = infer_shapes(a , a )
# Assert all variables are present
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , a )
self.assertSequenceEqual(variable_names[3:] , a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = ["input_ids", "attention_mask", "token_type_ids"]
lowercase_ : Optional[Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase_ , lowercase_ : Dict = ensure_valid_input(FuncContiguousArgs() , a , a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(a ) , set(a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(a , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase_ , lowercase_ : int = ensure_valid_input(FuncNonContiguousArgs() , a , a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(a ) , 1 )
self.assertEqual(len(a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : str = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if n_term == "":
return []
lowercase_ : list = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
UpperCamelCase__ = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowerCamelCase: Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
__lowerCamelCase: int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase: bool = field(
default=snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowerCamelCase: bool = field(
default=snake_case , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__lowerCamelCase: Optional[int] = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase: Optional[int] = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__lowerCamelCase: Optional[int] = field(
default=snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
__lowerCamelCase: Optional[str] = field(
default=snake_case , metadata={'help': 'A csv or a json file containing the training data.'} )
__lowerCamelCase: Optional[str] = field(
default=snake_case , metadata={'help': 'A csv or a json file containing the validation data.'} )
__lowerCamelCase: Optional[str] = field(default=snake_case , metadata={'help': 'A csv or a json file containing the test data.'} )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
lowercase_ : str = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase_ : Optional[int] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: str = field(
default=snake_case , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase: Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase: Optional[str] = field(
default=snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase: Optional[str] = field(
default=snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase: bool = field(
default=snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase: str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase: bool = field(
default=snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ : List[str] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase_ : Tuple = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase_ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase_ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase_ : Optional[int] = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase_ : Any = data_args.train_file.split("." )[-1]
lowercase_ : int = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase_ : int = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
lowercase_ : Union[str, Any] = load_dataset("csv" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase_ : Dict = load_dataset("json" , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase_ : Dict = raw_datasets["train"].features["label"].names
lowercase_ : Optional[Any] = len(_UpperCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase_ : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_UpperCamelCase , )
lowercase_ : Optional[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ : str = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ : Dict = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase_ : Dict = {"Refused": 0, "Entailed": 1}
lowercase_ : Optional[int] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowercase_ : List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_UpperCamelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(_UpperCamelCase ):
lowercase_ : str = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
lowercase_ : Optional[Any] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase_ : Tuple = examples["statement"]
lowercase_ : Any = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
lowercase_ : Union[str, Any] = tokenizer(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase )
lowercase_ : Dict = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
lowercase_ : Any = raw_datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
lowercase_ : Tuple = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowercase_ : Optional[int] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
lowercase_ : List[Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowercase_ : List[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
lowercase_ : Union[str, Any] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
lowercase_ : Tuple = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
lowercase_ : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
lowercase_ : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ : List[str] = default_data_collator
elif training_args.fpaa:
lowercase_ : Any = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
lowercase_ : Union[str, Any] = None
# Initialize our Trainer
lowercase_ : List[str] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
lowercase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : List[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : Dict = last_checkpoint
lowercase_ : Tuple = trainer.train(resume_from_checkpoint=_UpperCamelCase )
lowercase_ : List[str] = train_result.metrics
lowercase_ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
lowercase_ : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _UpperCamelCase )
trainer.save_metrics("train" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase_ : Any = trainer.evaluate(eval_dataset=_UpperCamelCase )
lowercase_ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
lowercase_ : Tuple = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase_ : Dict = predict_dataset.remove_columns("label" )
lowercase_ : Any = trainer.predict(_UpperCamelCase , metric_key_prefix="predict" ).predictions
lowercase_ : int = np.argmax(_UpperCamelCase , axis=1 )
lowercase_ : Any = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_UpperCamelCase ):
lowercase_ : Optional[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
lowercase_ : Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class _UpperCAmelCase :
def __init__( self : List[str] , a : Tuple , ):
'''simple docstring'''
lowercase_ : int = parent
lowercase_ : Optional[Any] = 1_3
lowercase_ : str = 7
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = True
lowercase_ : Optional[int] = True
lowercase_ : int = True
lowercase_ : Dict = True
lowercase_ : str = False
lowercase_ : Optional[Any] = False
lowercase_ : List[Any] = False
lowercase_ : Dict = 2
lowercase_ : int = 9_9
lowercase_ : Tuple = 0
lowercase_ : int = 3_2
lowercase_ : List[Any] = 2
lowercase_ : Optional[Any] = 4
lowercase_ : str = 0.1
lowercase_ : Dict = 0.1
lowercase_ : List[str] = 5_1_2
lowercase_ : Optional[int] = 1_6
lowercase_ : Dict = 2
lowercase_ : List[str] = 0.02
lowercase_ : Optional[int] = 3
lowercase_ : Optional[int] = 4
lowercase_ : Any = "last"
lowercase_ : Tuple = True
lowercase_ : Any = None
lowercase_ : Optional[Any] = 0
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowercase_ : int = None
if self.use_input_lengths:
lowercase_ : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase_ : Dict = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase_ : Optional[Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : str = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Union[str, Any] , a : int , a : int , a : List[Any] , a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : Optional[int] = TFFlaubertModel(config=a )
lowercase_ : List[Any] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowercase_ : Any = model(a )
lowercase_ : Optional[int] = [input_ids, input_mask]
lowercase_ : List[Any] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : Optional[Any] , a : Dict , a : Union[str, Any] , a : int , a : List[Any] , a : int , a : Union[str, Any] , a : Any , ):
'''simple docstring'''
lowercase_ : List[Any] = TFFlaubertWithLMHeadModel(a )
lowercase_ : Dict = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowercase_ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Any , a : Dict , a : Union[str, Any] , a : Union[str, Any] , a : Union[str, Any] , a : str , a : List[Any] , a : Optional[int] , a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ : Optional[int] = TFFlaubertForQuestionAnsweringSimple(a )
lowercase_ : Optional[Any] = {"input_ids": input_ids, "lengths": input_lengths}
lowercase_ : List[Any] = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Optional[Any] , a : Any , a : str , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Tuple , a : Tuple , a : int , a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[int] = TFFlaubertForSequenceClassification(a )
lowercase_ : Dict = {"input_ids": input_ids, "lengths": input_lengths}
lowercase_ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Optional[int] , a : List[Any] , a : int , a : Tuple , a : Any , a : str , a : Union[str, Any] , a : Optional[Any] , a : int , a : Dict , ):
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : List[str] = TFFlaubertForTokenClassification(config=a )
lowercase_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : Dict = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : List[str] , a : Optional[int] , a : Dict , a : str , a : Tuple , a : Tuple , a : Tuple , a : Any , a : Dict , a : List[str] , ):
'''simple docstring'''
lowercase_ : Any = self.num_choices
lowercase_ : int = TFFlaubertForMultipleChoice(config=a )
lowercase_ : List[str] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Union[str, Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : Optional[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) )
lowercase_ : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowercase_ : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[str] = config_and_inputs
lowercase_ : Any = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__lowerCamelCase: Dict = (
{
'feature-extraction': TFFlaubertModel,
'fill-mask': TFFlaubertWithLMHeadModel,
'question-answering': TFFlaubertForQuestionAnsweringSimple,
'text-classification': TFFlaubertForSequenceClassification,
'token-classification': TFFlaubertForTokenClassification,
'zero-shot': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase: List[str] = False
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : Optional[int] , a : Optional[Any] , a : Tuple , a : List[str] , a : Tuple , a : Optional[int] ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Optional[int] = TFFlaubertModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=a , emb_dim=3_7 )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*a )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = TFFlaubertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowercase_ : Tuple = tf.convert_to_tensor(
[[0, 1_5_8, 7_3_5, 2_5_9_2, 1_4_2_4, 6_7_2_7, 8_2, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowercase_ : str = model(a )[0]
lowercase_ : Optional[Any] = tf.TensorShape((1, 8, 5_1_2) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
lowercase_ : Dict = tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
UpperCamelCase__ = False
class _UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[Any] = "A painting of a squirrel eating a burger "
lowercase_ : Dict = torch.manual_seed(0 )
lowercase_ : Any = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase_ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : List[str] = generator.manual_seed(0 )
lowercase_ : Tuple = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : str = "A painting of a squirrel eating a burger "
lowercase_ : List[str] = torch.manual_seed(0 )
lowercase_ : int = pipe(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
lowercase_ : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowercase_ : str = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCamelCase__ = _symbol_database.Default()
UpperCamelCase__ = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
UpperCamelCase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCamelCase__ = None
UpperCamelCase__ = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCamelCase__ = 45
UpperCamelCase__ = 1581
UpperCamelCase__ = 1517
UpperCamelCase__ = 1570
UpperCamelCase__ = 1584
UpperCamelCase__ = 1793
UpperCamelCase__ = 1795
UpperCamelCase__ = 1916
UpperCamelCase__ = 1864
UpperCamelCase__ = 1905
UpperCamelCase__ = 1919
UpperCamelCase__ = 2429
UpperCamelCase__ = 2208
UpperCamelCase__ = 2418
UpperCamelCase__ = 2323
UpperCamelCase__ = 2407
# @@protoc_insertion_point(module_scope)
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: List[str] = ['input_ids', 'attention_mask']
__lowerCamelCase: List[Any] = RobertaTokenizer
def __init__( self : Optional[Any] , a : List[Any]=None , a : Optional[int]=None , a : Any=None , a : List[str]="replace" , a : str="<s>" , a : List[Any]="</s>" , a : Any="</s>" , a : str="<s>" , a : Any="<unk>" , a : Dict="<pad>" , a : Optional[int]="<mask>" , a : List[str]=False , a : int=True , **a : List[str] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : Tuple = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : str = add_prefix_space
lowercase_ : List[str] = pre_tok_class(**a )
lowercase_ : Any = add_prefix_space
lowercase_ : Dict = "post_processor"
lowercase_ : List[Any] = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
lowercase_ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowercase_ : Optional[Any] = tuple(state["cls"] )
lowercase_ : Optional[int] = False
if state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Tuple = True
if state.get("trim_offsets" , a ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Dict = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(a , state.pop("type" ) )
lowercase_ : Union[str, Any] = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
lowercase_ : Optional[int] = value
def lowerCAmelCase__ ( self : List[Any] , *a : Optional[Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , *a : str , **a : str ):
'''simple docstring'''
lowercase_ : int = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Dict , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : List[str] , a : Any , a : Optional[int]=None ):
'''simple docstring'''
lowercase_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Any = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
'''simple docstring'''
import sys
UpperCamelCase__ = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = N ):
"""simple docstring"""
lowercase_ : str = -sys.maxsize - 1
for i in range(len(_UpperCamelCase ) - 12 ):
lowercase_ : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase_ : int = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 640
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ : List[Any] = b * b - 4 * a * c
lowercase_ : Tuple = (-b + sqrt(_UpperCamelCase )) / (2 * a)
lowercase_ : Dict = (-b - sqrt(_UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ , lowercase_ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
lowercase_ : List[Any] = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_UpperCamelCase ).content
if __name__ == "__main__":
UpperCamelCase__ = input('Enter Video/IGTV url: ').strip()
UpperCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase :
def __init__( self : Dict , a : str , a : str=2 , a : Optional[Any]=3_2 , a : int=1_6 , a : Dict=3 , a : Tuple=True , a : Any=True , a : List[Any]=3_2 , a : Optional[int]=4 , a : str=[0, 1, 2, 3] , a : Optional[int]=4 , a : List[Any]=3_7 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Dict=0.1 , a : Optional[Any]=0.02 , a : List[Any]=3 , a : int=[1, 3_8_4, 2_4, 2_4] , a : Any=True , a : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : Dict = patch_size
lowercase_ : int = num_channels
lowercase_ : int = is_training
lowercase_ : List[str] = use_labels
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Any = backbone_out_indices
lowercase_ : int = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = initializer_range
lowercase_ : Optional[int] = num_labels
lowercase_ : Tuple = backbone_featmap_shape
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Optional[int] = (image_size // patch_size) ** 2
lowercase_ : List[str] = num_patches + 1
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = DPTModel(config=a )
model.to(a )
model.eval()
lowercase_ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Tuple , a : Any , a : Union[str, Any] , a : str ):
'''simple docstring'''
lowercase_ : Tuple = self.num_labels
lowercase_ : List[Any] = DPTForDepthEstimation(a )
model.to(a )
model.eval()
lowercase_ : Any = model(a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict , a : Union[str, Any] , a : int ):
'''simple docstring'''
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Any = DPTForSemanticSegmentation(a )
model.to(a )
model.eval()
lowercase_ : Tuple = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowerCamelCase: Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase: Tuple = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: List[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[Any] = DPTModelTester(self )
lowercase_ : str = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(a )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
if model_class in get_values(a ):
continue
lowercase_ : Union[str, Any] = model_class(a )
model.to(a )
model.train()
lowercase_ : Any = self._prepare_for_class(a , a , return_labels=a )
lowercase_ : str = model(**a ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[int] = False
lowercase_ : List[str] = True
if model_class in get_values(a ) or not model_class.supports_gradient_checkpointing:
continue
lowercase_ : Dict = model_class(a )
model.to(a )
model.gradient_checkpointing_enable()
model.train()
lowercase_ : Tuple = self._prepare_for_class(a , a , return_labels=a )
lowercase_ : List[str] = model(**a ).loss
loss.backward()
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = _config_zero_init(a )
for model_class in self.all_model_classes:
lowercase_ : str = model_class(config=a )
# Skip the check for the backbone
lowercase_ : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase_ : Dict = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase_ : str = DPTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = "add"
with self.assertRaises(a ):
lowercase_ : Optional[Any] = DPTForDepthEstimation(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
lowercase_ : Optional[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : List[str] = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
lowercase_ : Optional[int] = model(**a )
lowercase_ : Tuple = outputs.predicted_depth
# verify the predicted depth
lowercase_ : List[Any] = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , a )
lowercase_ : int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , a , atol=1e-4 ) )
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = tf.convert_to_tensor(_UpperCamelCase )
lowercase_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = tf.convert_to_tensor(_UpperCamelCase )
lowercase_ : str = tf.cast(math.pi , x.dtype )
lowercase_ : int = tf.cast(0.044715 , x.dtype )
lowercase_ : List[str] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_UpperCamelCase , 3 )) ))
return x * cdf
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = tf.convert_to_tensor(_UpperCamelCase )
return x * tf.tanh(tf.math.softplus(_UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = tf.convert_to_tensor(_UpperCamelCase )
lowercase_ : Optional[int] = tf.cast(0.044715 , x.dtype )
lowercase_ : Any = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = tf.convert_to_tensor(_UpperCamelCase )
lowercase_ : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return tf.clip_by_value(_gelu(_UpperCamelCase ) , -10 , 10 )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = tf.split(_UpperCamelCase , 2 , axis=_UpperCamelCase )
return a * tf.math.sigmoid(_UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return tf.keras.activations.gelu(_UpperCamelCase , approximate=_UpperCamelCase )
UpperCamelCase__ = tf.keras.activations.gelu
UpperCamelCase__ = approximate_gelu_wrap
else:
UpperCamelCase__ = _gelu
UpperCamelCase__ = _gelu_new
UpperCamelCase__ = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
| 640
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 1
|
'''simple docstring'''
import logging
from transformers import PretrainedConfig
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = 'bertabs'
def __init__( self : Tuple , a : Union[str, Any]=3_0_5_2_2 , a : List[Any]=5_1_2 , a : Optional[int]=6 , a : Any=5_1_2 , a : List[str]=8 , a : Optional[int]=5_1_2 , a : int=0.2 , a : Dict=6 , a : List[str]=7_6_8 , a : List[str]=8 , a : int=2_0_4_8 , a : Union[str, Any]=0.2 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : int = vocab_size
lowercase_ : Union[str, Any] = max_pos
lowercase_ : List[Any] = enc_layers
lowercase_ : Any = enc_hidden_size
lowercase_ : List[str] = enc_heads
lowercase_ : List[Any] = enc_ff_size
lowercase_ : Any = enc_dropout
lowercase_ : Optional[int] = dec_layers
lowercase_ : Any = dec_hidden_size
lowercase_ : Union[str, Any] = dec_heads
lowercase_ : Dict = dec_ff_size
lowercase_ : Optional[int] = dec_dropout
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
'''simple docstring'''
UpperCamelCase__ = 'Tobias Carryer'
from time import time
class _UpperCAmelCase :
def __init__( self : Optional[int] , a : int , a : Union[str, Any] , a : int , a : Optional[Any]=int(time() ) ): # noqa: B008
'''simple docstring'''
lowercase_ : Tuple = multiplier
lowercase_ : Union[str, Any] = increment
lowercase_ : Any = modulo
lowercase_ : str = seed
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase__ = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 640
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
def __init__( self : int , a : str , a : Tuple=1_3 , a : List[Any]=7 , a : List[str]=True , a : Any=True , a : int=True , a : List[Any]=True , a : List[Any]=9_9 , a : List[str]=3_2 , a : Optional[int]=2 , a : Tuple=4 , a : Optional[int]=3_7 , a : int="gelu" , a : int=0.1 , a : int=0.1 , a : List[str]=5_1_2 , a : Dict=1_6 , a : List[str]=2 , a : int=0.02 , a : Dict=False , a : List[str]=True , a : Union[str, Any]="None" , a : str=3 , a : Optional[int]=4 , a : Any=None , ):
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : Tuple = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : str = use_token_type_ids
lowercase_ : Tuple = use_labels
lowercase_ : str = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Optional[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : str = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : Tuple = type_vocab_size
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : int = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : Any = num_choices
lowercase_ : List[str] = relative_attention
lowercase_ : str = position_biased_input
lowercase_ : Optional[int] = pos_att_type
lowercase_ : List[Any] = scope
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Tuple = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Any = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Dict = None
lowercase_ : Tuple = None
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : str = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] , a : int , a : Optional[Any] , a : Tuple , a : Any , a : Union[str, Any] , a : Any ):
'''simple docstring'''
lowercase_ : Any = TFDebertaVaModel(config=a )
lowercase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowercase_ : List[Any] = [input_ids, input_mask]
lowercase_ : Tuple = model(a )
lowercase_ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , a : List[Any] , a : Dict , a : List[str] , a : Any , a : List[str] , a : Tuple , a : List[str] ):
'''simple docstring'''
lowercase_ : Any = TFDebertaVaForMaskedLM(config=a )
lowercase_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase_ : str = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : int , a : Dict , a : List[Any] , a : Optional[Any] , a : str , a : List[Any] , a : Union[str, Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : int = self.num_labels
lowercase_ : Optional[Any] = TFDebertaVaForSequenceClassification(config=a )
lowercase_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase_ : Optional[int] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] , a : Union[str, Any] , a : Tuple , a : Dict , a : Optional[Any] , a : str , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.num_labels
lowercase_ : Optional[int] = TFDebertaVaForTokenClassification(config=a )
lowercase_ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase_ : Optional[Any] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : str , a : Tuple , a : str , a : List[str] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : Dict = TFDebertaVaForQuestionAnswering(config=a )
lowercase_ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowercase_ : Tuple = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: List[str] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__lowerCamelCase: Dict = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowerCamelCase: Optional[int] = False
__lowerCamelCase: List[Any] = False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = TFDebertaVaModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Tuple = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
lowercase_ : Any = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase_ : Dict = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ : Optional[int] = model(a , attention_mask=a )[0]
lowercase_ : int = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1e-4 )
| 640
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Any = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
lowercase_ : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
return image
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = dct.pop(_UpperCamelCase )
lowercase_ : str = val
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowercase_ : str = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowercase_ : List[str] = torch.cat((q_bias, torch.zeros_like(_UpperCamelCase , requires_grad=_UpperCamelCase ), v_bias) )
lowercase_ : Any = qkv_bias
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = 364 if "coco" in model_name else 224
lowercase_ : Any = InstructBlipVisionConfig(image_size=_UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ : Optional[int] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ : Union[str, Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ : int = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=3_2001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
lowercase_ : str = InstructBlipConfig(vision_config=_UpperCamelCase , text_config=_UpperCamelCase , qformer_config=_UpperCamelCase )
return config, image_size
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
lowercase_ : str = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ : int = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
lowercase_ , lowercase_ : List[Any] = get_blipa_config(_UpperCamelCase )
lowercase_ : List[Any] = InstructBlipForConditionalGeneration(_UpperCamelCase ).eval()
lowercase_ : Dict = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
lowercase_ , lowercase_ : Tuple = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowercase_ : List[str] = "cuda:1" if torch.cuda.is_available() else "cpu"
lowercase_ : Optional[int] = "cuda:2" if torch.cuda.is_available() else "cpu"
lowercase_ , lowercase_ , lowercase_ : List[str] = load_model_and_preprocess(
name=_UpperCamelCase , model_type=_UpperCamelCase , is_eval=_UpperCamelCase , device=_UpperCamelCase )
original_model.eval()
print("Done!" )
# update state dict keys
lowercase_ : int = original_model.state_dict()
lowercase_ : Union[str, Any] = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ : int = state_dict.pop(_UpperCamelCase )
if key.startswith("Qformer.bert" ):
lowercase_ : Tuple = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowercase_ : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
lowercase_ : Union[str, Any] = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
lowercase_ : Tuple = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
lowercase_ : Dict = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
lowercase_ : str = key.replace("t5" , "language" )
lowercase_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(_UpperCamelCase , _UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
lowercase_ : Dict = load_demo_image()
lowercase_ : Dict = "What is unusual about this image?"
# create processor
lowercase_ : List[Any] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_UpperCamelCase , image_std=_UpperCamelCase )
lowercase_ : List[Any] = InstructBlipProcessor(
image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase , qformer_tokenizer=_UpperCamelCase , )
lowercase_ : str = processor(images=_UpperCamelCase , text=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# make sure processor creates exact same pixel values
lowercase_ : List[str] = vis_processors["eval"](_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
lowercase_ : Tuple = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _UpperCamelCase )
original_model.to(_UpperCamelCase )
hf_model.to(_UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ : List[str] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
lowercase_ : Union[str, Any] = hf_model(**_UpperCamelCase ).logits
else:
lowercase_ : Any = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
lowercase_ : Any = tokenizer("\n" , return_tensors="pt" ).input_ids.to(_UpperCamelCase )
lowercase_ : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase_ : List[Any] = hf_model(**_UpperCamelCase , labels=_UpperCamelCase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ : List[str] = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , _UpperCamelCase , atol=_UpperCamelCase )
print("Looks ok!" )
print("Generating with original model..." )
lowercase_ : List[str] = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
lowercase_ : Tuple = hf_model.generate(
**_UpperCamelCase , do_sample=_UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ : Any = 2
print("Original generation:" , _UpperCamelCase )
lowercase_ : List[Any] = processor.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
lowercase_ : Tuple = [text.strip() for text in output_text]
print("HF generation:" , _UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_UpperCamelCase )
hf_model.save_pretrained(_UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
UpperCamelCase__ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
UpperCamelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase__ = tuple[int, int]
class _UpperCAmelCase :
def __init__( self : Optional[int] ):
'''simple docstring'''
lowercase_ : List[str] = []
lowercase_ : List[str] = set()
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return len(self.elements ) == 0
def lowerCAmelCase__ ( self : Any , a : List[Any] , a : Tuple ):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(a )
else:
# update
# print("update", item)
lowercase_ : int = []
((lowercase_) , (lowercase_)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase_) , (lowercase_)) : List[Any] = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
if item in self.set:
self.set.remove(a )
lowercase_ : Optional[Any] = []
((lowercase_) , (lowercase_)) : List[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase_) , (lowercase_)) : Dict = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.elements[0][1]
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
((lowercase_) , (lowercase_)) : Optional[Any] = heapq.heappop(self.elements )
self.set.remove(a )
return (priority, item)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = np.array(_UpperCamelCase )
lowercase_ : Optional[int] = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
lowercase_ : List[str] = "*"
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
lowercase_ : int = "#"
lowercase_ : Optional[int] = "-"
lowercase_ : List[Any] = back_pointer[goal]
while x != start:
((lowercase_) , (lowercase_)) : Any = x
# print(x)
lowercase_ : Dict = "-"
lowercase_ : str = back_pointer[x]
lowercase_ : Any = "-"
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
lowercase_ : Dict = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=" " )
lowercase_ : Dict = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((lowercase_) , (lowercase_)) : Dict = s
lowercase_ : Union[str, Any] = (x - 1, y)
lowercase_ : Tuple = (x + 1, y)
lowercase_ : str = (x, y + 1)
lowercase_ : Dict = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
lowercase_ : Union[str, Any] = -1
lowercase_ : List[str] = float("inf" )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
lowercase_ : Dict = g_function[s] + 1
lowercase_ : Optional[Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Any = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase__ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase__ = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase__ = make_common_ground()
UpperCamelCase__ = blocks_blk
# hyper parameters
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = 20
UpperCamelCase__ = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase__ = (0, 0)
UpperCamelCase__ = (n - 1, n - 1)
UpperCamelCase__ = 1
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = {start: 0, goal: float("inf" )}
lowercase_ : Any = {start: -1, goal: -1}
lowercase_ : List[Any] = []
lowercase_ : Dict = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
lowercase_ : list[int] = []
lowercase_ : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ , lowercase_ : int = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ : List[str] = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
lowercase_ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if metric == "rouge2":
lowercase_ : Optional[int] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
lowercase_ : Union[str, Any] = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
lowercase_ : Tuple = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
lowercase_ : Any = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
lowercase_ : List[str] = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F"""val_{metric}""" , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class _UpperCAmelCase ( pl.Callback ):
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str] , a : int ):
'''simple docstring'''
lowercase_ : Dict = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a )
@rank_zero_only
def lowerCAmelCase__ ( self : List[str] , a : pl.Trainer , a : pl.LightningModule , a : str , a : Optional[Any]=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowercase_ : Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
lowercase_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowercase_ : List[Any] = od / "test_results.txt"
lowercase_ : Dict = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowercase_ : List[str] = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowercase_ : List[Any] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=a )
generations_file.parent.mkdir(exist_ok=a )
with open(a , "a+" ) as writer:
for key in sorted(a ):
if key in ["log", "progress_bar", "preds"]:
continue
lowercase_ : List[str] = metrics[key]
if isinstance(a , torch.Tensor ):
lowercase_ : Dict = val.item()
lowercase_ : int = f"""{key}: {val:.6f}\n"""
writer.write(a )
if not save_generations:
return
if "preds" in metrics:
lowercase_ : str = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(a )
@rank_zero_only
def lowerCAmelCase__ ( self : str , a : Optional[Any] , a : Any ):
'''simple docstring'''
try:
lowercase_ : Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
lowercase_ : List[Any] = pl_module.model.num_parameters()
lowercase_ : Union[str, Any] = count_trainable_parameters(a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def lowerCAmelCase__ ( self : Union[str, Any] , a : pl.Trainer , a : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(a , a , "test" )
@rank_zero_only
def lowerCAmelCase__ ( self : Any , a : pl.Trainer , a : Optional[int] ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
from math import isclose, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = point_y / 4 / point_x
lowercase_ : Tuple = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowercase_ : Union[str, Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowercase_ : Tuple = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowercase_ : Union[str, Any] = outgoing_gradient**2 + 4
lowercase_ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowercase_ : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowercase_ : Union[str, Any] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowercase_ : List[str] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowercase_ : Optional[int] = x_minus if isclose(_UpperCamelCase , _UpperCamelCase ) else x_plus
lowercase_ : List[str] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1.4 , _UpperCamelCase = -9.6 ):
"""simple docstring"""
lowercase_ : int = 0
lowercase_ : float = first_x_coord
lowercase_ : float = first_y_coord
lowercase_ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowercase_ , lowercase_ , lowercase_ : str = next_point(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: int = ['input_ids', 'attention_mask']
__lowerCamelCase: Tuple = None
def __init__( self : List[str] , a : Tuple=None , a : Dict=None , a : Union[str, Any]=None , a : List[Any]="<unk>" , a : Tuple="<s>" , a : str="</s>" , a : List[str]="<pad>" , a : int=False , a : int=False , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , pad_token=a , add_prefix_space=a , clean_up_tokenization_spaces=a , **a , )
lowercase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : Union[str, Any] = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : List[Any] = add_prefix_space
lowercase_ : List[str] = pre_tok_class(**a )
lowercase_ : int = add_prefix_space
def lowerCAmelCase__ ( self : Optional[Any] , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = kwargs.get("is_split_into_words" , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , *a : Union[str, Any] , **a : Tuple ):
'''simple docstring'''
lowercase_ : str = kwargs.get("is_split_into_words" , a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
" pretokenized inputs." )
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : "Conversation" ):
'''simple docstring'''
lowercase_ : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
lowercase_ : Optional[int] = input_ids[-self.model_max_length :]
return input_ids
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
import gc
import threading
import time
import psutil
import torch
class _UpperCAmelCase :
def __init__( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = psutil.Process()
lowercase_ : Union[str, Any] = False
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = -1
while True:
lowercase_ : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = True
lowercase_ : Any = threading.Thread(target=self.peak_monitor )
lowercase_ : Optional[int] = True
self.thread.start()
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = False
self.thread.join()
return self.cpu_memory_peak
UpperCamelCase__ = PeakCPUMemory()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Tuple = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : Dict = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : Union[str, Any] = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase_ : Any = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
lowercase_ : int = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase_ : List[str] = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
lowercase_ : Optional[Any] = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB""" )
lowercase_ : List[str] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , a : Dict , a : Optional[Any]=7 , a : List[Any]=3 , a : int=1_8 , a : Tuple=3_0 , a : Dict=4_0_0 , a : Dict=True , a : Optional[int]=None , a : Optional[int]=True , a : Any=None , a : List[str]=True , a : Tuple=[0.4814_5466, 0.457_8275, 0.4082_1073] , a : str=[0.2686_2954, 0.2613_0258, 0.2757_7711] , a : Tuple=True , ):
'''simple docstring'''
lowercase_ : Any = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase_ : List[Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowercase_ : Optional[int] = parent
lowercase_ : Any = batch_size
lowercase_ : int = num_channels
lowercase_ : List[Any] = image_size
lowercase_ : Optional[Any] = min_resolution
lowercase_ : Optional[int] = max_resolution
lowercase_ : List[Any] = do_resize
lowercase_ : Dict = size
lowercase_ : List[str] = do_center_crop
lowercase_ : List[str] = crop_size
lowercase_ : Optional[int] = do_normalize
lowercase_ : Tuple = image_mean
lowercase_ : Optional[int] = image_std
lowercase_ : List[Any] = do_convert_rgb
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowerCAmelCase__ ( self : str , a : Optional[Any]=False , a : Optional[int]=False , a : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase_ : Union[str, Any] = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase_ : Tuple = []
for i in range(self.batch_size ):
lowercase_ , lowercase_ : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase_ : Optional[int] = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase_ : str = [torch.from_numpy(a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=a )
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "do_center_crop" ) )
self.assertTrue(hasattr(a , "center_crop" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
lowercase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : int = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : int = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : List[str] = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=a )
lowercase_ : Any = 3
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , "do_resize" ) )
self.assertTrue(hasattr(a , "size" ) )
self.assertTrue(hasattr(a , "do_center_crop" ) )
self.assertTrue(hasattr(a , "center_crop" ) )
self.assertTrue(hasattr(a , "do_normalize" ) )
self.assertTrue(hasattr(a , "image_mean" ) )
self.assertTrue(hasattr(a , "image_std" ) )
self.assertTrue(hasattr(a , "do_convert_rgb" ) )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase_ : Dict = image_processing(a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
from collections.abc import Generator
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Optional[Any] = 0, 1
while True:
lowercase_ , lowercase_ : Tuple = b, a + b
yield b
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 1000 ):
"""simple docstring"""
lowercase_ : Optional[Any] = 1
lowercase_ : Optional[int] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase__ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = "mumbai" ):
"""simple docstring"""
lowercase_ : int = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
lowercase_ : Optional[int] = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
lowercase_ : Dict = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 1
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = "mock-s3-bucket"
lowercase_ : Any = F"""s3://{mock_bucket}"""
lowercase_ : int = extract_path_from_uri(_UpperCamelCase )
assert dataset_path.startswith("s3://" ) is False
lowercase_ : str = "./local/path"
lowercase_ : int = extract_path_from_uri(_UpperCamelCase )
assert dataset_path == new_dataset_path
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is True
lowercase_ : str = fsspec.filesystem("file" )
lowercase_ : List[Any] = is_remote_filesystem(_UpperCamelCase )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
lowercase_ : Any = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase_ : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCamelCase )
lowercase_ : List[str] = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Optional[Any] = os.path.basename(_UpperCamelCase )
lowercase_ : str = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(_UpperCamelCase , "r" , encoding="utf-8" ) as f, open(_UpperCamelCase , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
lowercase_ : str = compressed_file_paths[protocol]
lowercase_ : Any = "dataset.jsonl"
lowercase_ : Dict = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase_ , *lowercase_ : List[Any] = fsspec.get_fs_token_paths(_UpperCamelCase )
assert fs.isfile(_UpperCamelCase )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = hf_api.dataset_info(_UpperCamelCase , token=_UpperCamelCase )
lowercase_ : Optional[Any] = HfFileSystem(repo_info=_UpperCamelCase , token=_UpperCamelCase )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(_UpperCamelCase ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : List[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCamelCase , _UpperCamelCase , clobber=_UpperCamelCase )
with pytest.warns(_UpperCamelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCamelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase__ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 48000,
'sample_size': 65536,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 48000,
'sample_size': 131072,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 16000,
'sample_size': 65536,
},
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = torch.sin(t * math.pi / 2 ) ** 2
lowercase_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase )
class _UpperCAmelCase ( snake_case ):
pass
class _UpperCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , a : Any ):
'''simple docstring'''
super().__init__()
lowercase_ : Optional[Any] = DiffusionAttnUnetaD(a , n_attn_layers=4 )
lowercase_ : List[Any] = deepcopy(self.diffusion )
lowercase_ : int = torch.quasirandom.SobolEngine(1 , scramble=a )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = MODELS_MAP[model_name]["url"]
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
UpperCamelCase__ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
UpperCamelCase__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
UpperCamelCase__ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
UpperCamelCase__ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
UpperCamelCase__ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
return name.replace(_UpperCamelCase , _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=13 ):
"""simple docstring"""
lowercase_ : List[Any] = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
lowercase_ : Optional[int] = 0
if string.startswith("net.3." ):
depth += 1
lowercase_ : int = string[6:]
elif string.startswith("net." ):
lowercase_ : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
lowercase_ : Optional[int] = string[7:]
if string.startswith("main." ):
lowercase_ : int = string[5:]
# mid block
if string[:2].isdigit():
lowercase_ : Any = string[:2]
lowercase_ : Dict = string[2:]
else:
lowercase_ : Optional[Any] = string[0]
lowercase_ : List[str] = string[1:]
if depth == max_depth:
lowercase_ : List[str] = MID_NUM_TO_LAYER[layer_num]
lowercase_ : Any = "mid_block"
elif depth > 0 and int(_UpperCamelCase ) < 7:
lowercase_ : Optional[int] = DOWN_NUM_TO_LAYER[layer_num]
lowercase_ : List[str] = F"""down_blocks.{depth}"""
elif depth > 0 and int(_UpperCamelCase ) > 7:
lowercase_ : str = UP_NUM_TO_LAYER[layer_num]
lowercase_ : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
lowercase_ : int = DEPTH_0_TO_LAYER[layer_num]
lowercase_ : Optional[Any] = F"""up_blocks.{max_depth - 1}""" if int(_UpperCamelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
lowercase_ : Tuple = string_left[1:]
if "resnets" in new_layer:
lowercase_ : Tuple = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
lowercase_ : Union[str, Any] = convert_attn_naming(_UpperCamelCase )
lowercase_ : Tuple = new_string_left
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : Optional[int] = prefix + "." + new_layer + "." + string_left
else:
lowercase_ : Any = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
lowercase_ : List[str] = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : List[str] = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
lowercase_ : Tuple = v
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
lowercase_ : List[Any] = v[:, :, 0]
else:
# bias
lowercase_ : Any = v
else:
# qkv matrices
lowercase_ : str = v.shape[0]
lowercase_ : Optional[int] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase_ : Any = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase_ : List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowercase_ : List[Any] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
lowercase_ : Optional[Any] = download(_UpperCamelCase )
lowercase_ : Any = MODELS_MAP[model_name]["sample_rate"]
lowercase_ : List[str] = MODELS_MAP[model_name]["sample_size"]
lowercase_ : str = Object()
lowercase_ : List[str] = sample_size
lowercase_ : List[Any] = sample_rate
lowercase_ : Any = 0
lowercase_ : Any = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase )
lowercase_ : Any = diffusers_model.state_dict()
lowercase_ : int = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )["state_dict"] )
lowercase_ : Dict = orig_model.diffusion_ema.eval()
lowercase_ : Dict = orig_model.state_dict()
lowercase_ : str = rename_orig_weights(_UpperCamelCase )
lowercase_ : int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase_ : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith("kernel" ) for k in list(_UpperCamelCase ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
lowercase_ : int = value.squeeze()
lowercase_ : List[Any] = value
diffusers_model.load_state_dict(_UpperCamelCase )
lowercase_ : int = 100
lowercase_ : List[Any] = 33
lowercase_ : Union[str, Any] = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
lowercase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
lowercase_ : Any = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase )
lowercase_ : int = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1]
lowercase_ : int = get_crash_schedule(_UpperCamelCase )
lowercase_ : str = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
lowercase_ : Optional[Any] = torch.manual_seed(33 )
lowercase_ : List[Any] = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios
lowercase_ : int = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} )
lowercase_ : Any = generated.clamp(-1 , 1 )
lowercase_ : Tuple = (generated - audio).abs().sum()
lowercase_ : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , _UpperCamelCase )
print("Diff max" , _UpperCamelCase )
assert diff_max < 1e-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ = parser.parse_args()
main(args)
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = ['pixel_values']
def __init__( self : int , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 2_5_5 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = True , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : Tuple = size if size is not None else {"shortest_edge": 2_2_4}
lowercase_ : Tuple = get_size_dict(a , default_to_square=a )
lowercase_ : Any = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase_ : Any = get_size_dict(a , default_to_square=a , param_name="crop_size" )
lowercase_ : int = do_resize
lowercase_ : List[str] = size
lowercase_ : Any = resample
lowercase_ : List[Any] = do_center_crop
lowercase_ : int = crop_size
lowercase_ : Any = do_rescale
lowercase_ : str = rescale_factor
lowercase_ : Optional[int] = do_normalize
lowercase_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : Optional[Any] = do_convert_rgb
def lowerCAmelCase__ ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ):
'''simple docstring'''
lowercase_ : Optional[int] = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase_ : Union[str, Any] = get_resize_output_image_size(a , size=size["shortest_edge"] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ):
'''simple docstring'''
lowercase_ : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def lowerCAmelCase__ ( self : Dict , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : int , ):
'''simple docstring'''
return rescale(a , scale=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : int , ):
'''simple docstring'''
return normalize(a , mean=a , std=a , data_format=a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : int = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , **a : Any , ):
'''simple docstring'''
lowercase_ : Dict = do_resize if do_resize is not None else self.do_resize
lowercase_ : Dict = size if size is not None else self.size
lowercase_ : Any = get_size_dict(a , param_name="size" , default_to_square=a )
lowercase_ : List[str] = resample if resample is not None else self.resample
lowercase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase_ : Dict = get_size_dict(a , param_name="crop_size" , default_to_square=a )
lowercase_ : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : Tuple = image_std if image_std is not None else self.image_std
lowercase_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : str = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Tuple = [convert_to_rgb(a ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Tuple = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase_ : Dict = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase_ : Union[str, Any] = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase_ : Dict = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase_ : Optional[Any] = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase_ : Union[str, Any] = [to_channel_dimension_format(a , a ) for image in images]
lowercase_ : List[str] = {"pixel_values": images}
return BatchFeature(data=a , tensor_type=a )
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = 'xlm'
__lowerCamelCase: Any = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : Any , a : str=3_0_1_4_5 , a : List[str]=2_0_4_8 , a : int=1_2 , a : int=1_6 , a : Optional[int]=0.1 , a : Optional[Any]=0.1 , a : List[Any]=True , a : Optional[Any]=False , a : Dict=False , a : Union[str, Any]=False , a : List[str]=1 , a : Any=True , a : List[Any]=5_1_2 , a : Union[str, Any]=2_0_4_8**-0.5 , a : str=1e-12 , a : str=0.02 , a : Optional[int]=0 , a : int=1 , a : Optional[int]=2 , a : int=3 , a : str=5 , a : List[Any]=True , a : Tuple="first" , a : List[Any]=True , a : str=None , a : int=True , a : Dict=0.1 , a : Any=5 , a : Tuple=5 , a : Dict=0 , a : List[str]=0 , a : Union[str, Any]=2 , a : str=0 , **a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[int] = vocab_size
lowercase_ : str = emb_dim
lowercase_ : Any = n_layers
lowercase_ : Tuple = n_heads
lowercase_ : Union[str, Any] = dropout
lowercase_ : Optional[Any] = attention_dropout
lowercase_ : Union[str, Any] = gelu_activation
lowercase_ : Dict = sinusoidal_embeddings
lowercase_ : List[Any] = causal
lowercase_ : Dict = asm
lowercase_ : List[str] = n_langs
lowercase_ : int = use_lang_emb
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : List[Any] = bos_index
lowercase_ : Any = eos_index
lowercase_ : int = pad_index
lowercase_ : Dict = unk_index
lowercase_ : List[str] = mask_index
lowercase_ : Union[str, Any] = is_encoder
lowercase_ : Any = max_position_embeddings
lowercase_ : int = embed_init_std
lowercase_ : List[str] = init_std
lowercase_ : int = summary_type
lowercase_ : List[Any] = summary_use_proj
lowercase_ : Optional[Any] = summary_activation
lowercase_ : Dict = summary_proj_to_labels
lowercase_ : Any = summary_first_dropout
lowercase_ : Union[str, Any] = start_n_top
lowercase_ : List[Any] = end_n_top
lowercase_ : Optional[int] = mask_token_id
lowercase_ : List[Any] = lang_id
if "n_words" in kwargs:
lowercase_ : str = kwargs["n_words"]
super().__init__(pad_token_id=a , bos_token_id=a , **a )
class _UpperCAmelCase ( snake_case ):
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 1
|
'''simple docstring'''
UpperCamelCase__ = [
(1000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
lowercase_ : str = 0
lowercase_ : List[str] = 0
while place < len(_UpperCamelCase ):
if (place + 1 < len(_UpperCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = []
for arabic, roman in ROMAN:
((lowercase_) , (lowercase_)) : Union[str, Any] = divmod(_UpperCamelCase , _UpperCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCamelCase__ = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = SavedModel()
lowercase_ : Union[str, Any] = []
with open(os.path.join(_UpperCamelCase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
lowercase_ : int = json.load(_UpperCamelCase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_UpperCamelCase )] )
with open(_UpperCamelCase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
lowercase_ : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
lowercase_ : Optional[int] = sorted(_UpperCamelCase )
lowercase_ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_UpperCamelCase )
if strict and len(_UpperCamelCase ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_UpperCamelCase ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_UpperCamelCase , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=12, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCamelCase__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 640
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
UpperCamelCase__ = get_tests_dir('fixtures')
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = mock.Mock()
lowercase_ : Optional[int] = 5_0_0
lowercase_ : Union[str, Any] = {}
lowercase_ : Optional[int] = HTTPError
lowercase_ : Dict = {}
# Download this model to make sure it's in the cache.
lowercase_ : Optional[Any] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=a ) as mock_head:
lowercase_ : Any = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
with self.assertRaises(a ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase_ : int = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
lowercase_ : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(a )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = TOKEN
HfFolder.save_token(a )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : int = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
lowercase_ : Optional[int] = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id="test-image-processor" , push_to_hub=a , use_auth_token=self._token )
lowercase_ : str = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = ViTImageProcessor.from_pretrained(a )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
lowercase_ : Any = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id="valid_org/test-image-processor-org" , push_to_hub=a , use_auth_token=self._token )
lowercase_ : Dict = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
lowercase_ : List[str] = CustomImageProcessor.from_pretrained(a )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=a )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 640
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : Tuple = np.shape(_UpperCamelCase )
if rows != columns:
lowercase_ : List[str] = (
"'table' has to be of square shaped array but got a "
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(_UpperCamelCase )
lowercase_ : Dict = np.zeros((rows, columns) )
lowercase_ : Any = np.zeros((rows, columns) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
lowercase_ : List[str] = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
lowercase_ : str = (table[i][j] - total) / upper[j][j]
lowercase_ : Tuple = 1
for j in range(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : int = sum(lower[i][k] * upper[k][j] for k in range(_UpperCamelCase ) )
lowercase_ : Optional[int] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
lowercase_ : Optional[Any] = s_dict.pop(_UpperCamelCase )
elif "subsample" in key:
lowercase_ : List[str] = s_dict.pop(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = emb.weight.shape
lowercase_ : List[str] = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowercase_ : Union[str, Any] = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = torch.load(_UpperCamelCase , map_location="cpu" )
lowercase_ : List[Any] = mam_aaa["args"]
lowercase_ : Any = mam_aaa["model"]
lowercase_ : Union[str, Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
lowercase_ : Union[str, Any] = state_dict["decoder.embed_tokens.weight"].shape[0]
lowercase_ : Tuple = args.share_decoder_input_output_embed
lowercase_ : Any = [int(_UpperCamelCase ) for i in args.conv_kernel_sizes.split("," )]
lowercase_ : Dict = SpeechaTextConfig(
vocab_size=_UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_UpperCamelCase , num_beams=5 , max_length=200 , use_cache=_UpperCamelCase , decoder_start_token_id=2 , early_stopping=_UpperCamelCase , )
lowercase_ : int = SpeechaTextForConditionalGeneration(_UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowercase_ : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase_ : Any = lm_head_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase__ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase__ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase__ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = len([g for position, g in enumerate(_UpperCamelCase ) if g == main_target[position]] )
return (item, float(_UpperCamelCase ))
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = random.randint(0 , len(_UpperCamelCase ) - 1 )
lowercase_ : Any = parent_a[:random_slice] + parent_a[random_slice:]
lowercase_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = list(_UpperCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase_ : Optional[Any] = random.choice(_UpperCamelCase )
return "".join(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
lowercase_ : Union[str, Any] = []
# Generate more children proportionally to the fitness score.
lowercase_ : str = int(parent_a[1] * 100 ) + 1
lowercase_ : Dict = 10 if child_n >= 10 else child_n
for _ in range(_UpperCamelCase ):
lowercase_ : int = population_score[random.randint(0 , _UpperCamelCase )][0]
lowercase_ , lowercase_ : Dict = crossover(parent_a[0] , _UpperCamelCase )
# Append new string to the population list.
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
pop.append(mutate(_UpperCamelCase , _UpperCamelCase ) )
return pop
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowercase_ : Optional[Any] = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_UpperCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase_ : Union[str, Any] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase_ : Optional[Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_UpperCamelCase )
# Generate random starting population.
lowercase_ : Dict = []
for _ in range(_UpperCamelCase ):
population.append("".join([random.choice(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase_ , lowercase_ : Optional[int] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_UpperCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase_ : List[Any] = [evaluate(_UpperCamelCase , _UpperCamelCase ) for item in population]
# Check if there is a matching evolution.
lowercase_ : Union[str, Any] = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase_ : Union[str, Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_UpperCamelCase )
# Normalize population score to be between 0 and 1.
lowercase_ : Union[str, Any] = [
(item, score / len(_UpperCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_UpperCamelCase ):
population.extend(select(population_score[int(_UpperCamelCase )] , _UpperCamelCase , _UpperCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_UpperCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase__ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCamelCase__ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Union[str, Any] = int(_UpperCamelCase )
assert noofclusters < len(_UpperCamelCase )
# Find out the dimensionality
lowercase_ : Optional[Any] = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase_ : Union[str, Any] = list(range(len(_UpperCamelCase ) ) )
shuffle(_UpperCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase_ : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase_ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase_ : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase_ : Dict = tf.placeholder("float64" , [dim] )
lowercase_ : int = []
for centroid in centroids:
cent_assigns.append(tf.assign(_UpperCamelCase , _UpperCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase_ : Dict = [tf.Variable(0 ) for i in range(len(_UpperCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase_ : Dict = tf.placeholder("int32" )
lowercase_ : Union[str, Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_UpperCamelCase , _UpperCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase_ : Any = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase_ : Tuple = tf.reduce_mean(_UpperCamelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase_ : Any = tf.placeholder("float" , [dim] )
lowercase_ : Optional[int] = tf.placeholder("float" , [dim] )
lowercase_ : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCamelCase , _UpperCamelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase_ : str = tf.placeholder("float" , [noofclusters] )
lowercase_ : Dict = tf.argmin(_UpperCamelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase_ : Union[str, Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_UpperCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase_ : str = 100
for _ in range(_UpperCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_UpperCamelCase ) ):
lowercase_ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase_ : Dict = [
sess.run(_UpperCamelCase , feed_dict={va: vect, va: sess.run(_UpperCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase_ : int = sess.run(
_UpperCamelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_UpperCamelCase ):
# Collect all the vectors assigned to this cluster
lowercase_ : List[Any] = [
vectors[i]
for i in range(len(_UpperCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase_ : Dict = sess.run(
_UpperCamelCase , feed_dict={mean_input: array(_UpperCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase_ : Union[str, Any] = sess.run(_UpperCamelCase )
lowercase_ : Union[str, Any] = sess.run(_UpperCamelCase )
return centroids, assignments
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
from copy import deepcopy
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , a : list[int] | None = None , a : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
lowercase_ : Dict = size
lowercase_ : List[str] = [0] * size
elif arr is not None:
self.init(a )
else:
raise ValueError("Either arr or size must be specified" )
def lowerCAmelCase__ ( self : Optional[int] , a : list[int] ):
'''simple docstring'''
lowercase_ : int = len(a )
lowercase_ : List[Any] = deepcopy(a )
for i in range(1 , self.size ):
lowercase_ : Union[str, Any] = self.next_(a )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowercase_ : Dict = self.next_(a )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCAmelCase__ ( a : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def lowerCAmelCase__ ( a : int ):
'''simple docstring'''
return index - (index & (-index))
def lowerCAmelCase__ ( self : List[str] , a : int , a : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowercase_ : Dict = self.next_(a )
def lowerCAmelCase__ ( self : List[str] , a : int , a : int ):
'''simple docstring'''
self.add(a , value - self.get(a ) )
def lowerCAmelCase__ ( self : str , a : int ):
'''simple docstring'''
if right == 0:
return 0
lowercase_ : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowercase_ : Dict = self.prev(a )
return result
def lowerCAmelCase__ ( self : Tuple , a : int , a : int ):
'''simple docstring'''
return self.prefix(a ) - self.prefix(a )
def lowerCAmelCase__ ( self : Any , a : int ):
'''simple docstring'''
return self.query(a , index + 1 )
def lowerCAmelCase__ ( self : Tuple , a : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowercase_ : str = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowercase_ : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : Any = "backbone." if is_semantic else ""
lowercase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(F"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(F"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(F"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase_ : List[str] = "backbone." if is_semantic else ""
# queries, keys and values
lowercase_ : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowercase_ : Dict = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
lowercase_ : Optional[int] = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
lowercase_ : Any = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : Tuple = q_bias
lowercase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowercase_ : Tuple = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
lowercase_ : List[Any] = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
lowercase_ : Any = gamma_a
lowercase_ : List[str] = gamma_a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = dct.pop(_UpperCamelCase )
lowercase_ : Optional[Any] = val
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ : Optional[int] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
lowercase_ : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowercase_ : Optional[int] = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowercase_ : Union[str, Any] = 1024
lowercase_ : Union[str, Any] = 4096
lowercase_ : List[str] = 24
lowercase_ : Optional[int] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowercase_ : Any = 16
lowercase_ : int = "huggingface/label-files"
lowercase_ : Union[str, Any] = "rvlcdip-id2label.json"
lowercase_ : int = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
lowercase_ : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowercase_ : Optional[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["model"]
lowercase_ : Any = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
lowercase_ : List[Any] = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
lowercase_ : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors="pt" )
lowercase_ : Any = encoding["pixel_values"]
lowercase_ : Any = model(_UpperCamelCase )
lowercase_ : Any = outputs.logits
# verify logits
lowercase_ : Dict = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
lowercase_ : Any = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowercase_ : Tuple = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
UpperCamelCase__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("only integers accepted as input" )
else:
lowercase_ : List[Any] = str(abs(_UpperCamelCase ) )
lowercase_ : int = [list(_UpperCamelCase ) for char in range(len(_UpperCamelCase ) )]
for index in range(len(_UpperCamelCase ) ):
num_transpositions[index].pop(_UpperCamelCase )
return max(
int("".join(list(_UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCamelCase__ = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self : List[str] , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = (
os.path.join(a , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase_ : List[str] = Extractor
def lowerCAmelCase__ ( self : str , a : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase_ : Union[str, Any] = os.path.abspath(a )
return os.path.join(self.extract_dir , hash_url_to_filename(a ) )
def lowerCAmelCase__ ( self : Union[str, Any] , a : str , a : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(a ) and not (os.path.isdir(a ) and os.listdir(a ))
)
def lowerCAmelCase__ ( self : List[Any] , a : str , a : bool = False ):
'''simple docstring'''
lowercase_ : List[Any] = self.extractor.infer_extractor_format(a )
if not extractor_format:
return input_path
lowercase_ : str = self._get_output_path(a )
if self._do_extract(a , a ):
self.extractor.extract(a , a , a )
return output_path
class _UpperCAmelCase ( snake_case ):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] , **a : List[Any] ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
...
class _UpperCAmelCase ( snake_case , snake_case ):
__lowerCamelCase: List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
with open(a , "rb" ) as f:
return f.read(a )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase_ : int = max(len(a ) for cls_magic_number in cls.magic_numbers )
try:
lowercase_ : List[Any] = cls.read_magic_number(a , a )
except OSError:
return False
return any(magic_number.startswith(a ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( snake_case ):
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , a : Union[Path, str] , **a : Optional[Any] ):
'''simple docstring'''
return tarfile.is_tarfile(a )
@staticmethod
def lowerCAmelCase__ ( a : List[str] , a : int ):
'''simple docstring'''
def resolved(a : str ) -> str:
return os.path.realpath(os.path.abspath(a ) )
def badpath(a : str , a : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a , a ) ).startswith(a )
def badlink(a : Any , a : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase_ : Optional[int] = resolved(os.path.join(a , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a )
lowercase_ : List[str] = resolved(a )
for finfo in members:
if badpath(finfo.name , a ):
logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a , a ):
logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
lowercase_ : List[str] = tarfile.open(a )
tar_file.extractall(a , members=TarExtractor.safemembers(a , a ) )
tar_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x1F\x8B']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with gzip.open(a , "rb" ) as gzip_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = [
b'PK\x03\x04',
b'PK\x05\x06', # empty archive
b'PK\x07\x08', # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , a : Union[Path, str] , a : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(a , magic_number=a ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a , "rb" ) as fp:
lowercase_ : Optional[int] = _EndRecData(a )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase_ : int = fp.read(a ) # CD is where we expect it to be
if len(a ) == sizeCentralDir:
lowercase_ : Union[str, Any] = struct.unpack(a , a ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
os.makedirs(a , exist_ok=a )
with zipfile.ZipFile(a , "r" ) as zip_file:
zip_file.extractall(a )
zip_file.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = [b'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with lzma.open(a ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[int] = [b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(a , exist_ok=a )
lowercase_ : Union[str, Any] = rarfile.RarFile(a )
rf.extractall(a )
rf.close()
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = [b'\x28\xb5\x2F\xFD']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
lowercase_ : Union[str, Any] = zstd.ZstdDecompressor()
with open(a , "rb" ) as ifh, open(a , "wb" ) as ofh:
dctx.copy_stream(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = [b'\x42\x5A\x68']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
with bza.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = [b'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(a , exist_ok=a )
with pyazr.SevenZipFile(a , "r" ) as archive:
archive.extractall(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = [b'\x04\x22\x4D\x18']
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(a , "rb" ) as compressed_file:
with open(a , "wb" ) as extracted_file:
shutil.copyfileobj(a , a )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__lowerCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls : Any ):
'''simple docstring'''
return max(
len(a )
for extractor in cls.extractors.values()
if issubclass(a , a )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( a : Union[Path, str] , a : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(a , magic_number_length=a )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls : int , a : Union[Path, str] , a : bool = False ):
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=a , )
lowercase_ : str = cls.infer_extractor_format(a )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase_ : Any = cls._get_magic_number_max_length()
lowercase_ : Any = cls._read_magic_number(a , a )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a , magic_number=a ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls : List[str] , a : Union[Path, str] , a : Union[Path, str] , a : Optional[str] = None , a : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(a ) , exist_ok=a )
# Prevent parallel extractions
lowercase_ : int = str(Path(a ).with_suffix(".lock" ) )
with FileLock(a ):
shutil.rmtree(a , ignore_errors=a )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a , a ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=a , )
lowercase_ : Union[str, Any] = extractor if extractor != "deprecated" else extractor_format
else:
lowercase_ : Any = cls.extractors[extractor_format]
return extractor.extract(a , a )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=a , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a ):
return extractor.extract(a , a )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
def __init__( self : List[str] , a : Tuple , a : str=1_3 , a : List[str]=3_0 , a : Union[str, Any]=2 , a : Tuple=3 , a : List[Any]=True , a : int=True , a : Optional[int]=3_2 , a : List[str]=5 , a : int=4 , a : int=3_7 , a : Tuple="gelu" , a : str=0.1 , a : Tuple=0.1 , a : List[Any]=1_0 , a : int=0.02 , a : Union[str, Any]=3 , a : Dict=0.6 , a : Union[str, Any]=None , ):
'''simple docstring'''
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : str = image_size
lowercase_ : int = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Union[str, Any] = is_training
lowercase_ : List[str] = use_labels
lowercase_ : Union[str, Any] = hidden_size
lowercase_ : Tuple = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = type_sequence_label_size
lowercase_ : int = initializer_range
lowercase_ : List[Any] = mask_ratio
lowercase_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase_ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase_ : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self : Tuple , a : Dict , a : Union[str, Any] , a : str ):
'''simple docstring'''
lowercase_ : List[Any] = ViTMAEModel(config=a )
model.to(a )
model.eval()
lowercase_ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[int] , a : Union[str, Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = ViTMAEForPreTraining(a )
model.to(a )
model.eval()
lowercase_ : str = model(a )
lowercase_ : Optional[int] = (self.image_size // self.patch_size) ** 2
lowercase_ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase_ : str = 1
lowercase_ : Any = ViTMAEForPreTraining(a )
model.to(a )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Optional[int] = model(a )
lowercase_ : List[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : str = config_and_inputs
lowercase_ : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( snake_case , snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowerCamelCase: Optional[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowerCamelCase: Dict = False
__lowerCamelCase: Dict = False
__lowerCamelCase: Tuple = False
__lowerCamelCase: str = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Tuple = ViTMAEModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(a )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def lowerCAmelCase__ ( self : Tuple , a : Tuple , a : List[Any] , a : int ):
'''simple docstring'''
np.random.seed(2 )
lowercase_ : int = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : List[Any] = torch.from_numpy(a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase_ : Tuple = pt_noise
super().check_pt_tf_models(a , a , a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(a )
model.to(a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowercase_ : List[str] = outputs[0].cpu().numpy()
lowercase_ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
lowercase_ : Tuple = model_class.from_pretrained(a )
model.to(a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(a , a ) )
# Make sure we don't have nans
lowercase_ : List[str] = after_outputs[0].cpu().numpy()
lowercase_ : int = 0
lowercase_ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = ViTMAEModel.from_pretrained(a )
self.assertIsNotNone(a )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
np.random.seed(2 )
lowercase_ : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(a )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Optional[int] = image_processor(images=a , return_tensors="pt" ).to(a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase_ : Optional[int] = ViTMAEConfig()
lowercase_ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase_ : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase_ : int = model(**a , noise=torch.from_numpy(a ).to(device=a ) )
# verify the logits
lowercase_ : int = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , a )
lowercase_ : Optional[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a ) , atol=1e-4 ) )
| 640
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
def __init__( self : str , *a : Optional[Any] , **a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a , )
super().__init__(*a , **a )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 1
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
__lowerCamelCase: str
__lowerCamelCase: str = None
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : str , a : int , a : int , a : str , **a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Union[str, Any] , a : Dict ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
return f"""`pip install {cls.pip_package or cls.name}`"""
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'optuna'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCAmelCase__ ( self : str , a : Any , a : int , a : str , **a : Dict ):
'''simple docstring'''
return run_hp_search_optuna(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Dict ):
'''simple docstring'''
return default_hp_space_optuna(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = 'ray'
__lowerCamelCase: Optional[Any] = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCAmelCase__ ( self : List[str] , a : List[Any] , a : int , a : str , **a : List[str] ):
'''simple docstring'''
return run_hp_search_ray(a , a , a , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Tuple ):
'''simple docstring'''
return default_hp_space_ray(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Dict = 'sigopt'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any] , a : int , a : str , **a : int ):
'''simple docstring'''
return run_hp_search_sigopt(a , a , a , **a )
def lowerCAmelCase__ ( self : List[str] , a : Tuple ):
'''simple docstring'''
return default_hp_space_sigopt(a )
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'wandb'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCAmelCase__ ( self : Dict , a : Optional[Any] , a : int , a : str , **a : Optional[Any] ):
'''simple docstring'''
return run_hp_search_wandb(a , a , a , **a )
def lowerCAmelCase__ ( self : List[Any] , a : Optional[Any] ):
'''simple docstring'''
return default_hp_space_wandb(a )
UpperCamelCase__ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
lowercase_ : List[str] = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
UpperCamelCase__ = '1'
UpperCamelCase__ = '0'
UpperCamelCase__ = '1'
UpperCamelCase__ = ort.SessionOptions()
UpperCamelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
UpperCamelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
UpperCamelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
UpperCamelCase__ = ort.RunOptions()
UpperCamelCase__ = 128
UpperCamelCase__ = 1
UpperCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
UpperCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
UpperCamelCase__ = time.time()
UpperCamelCase__ = 2000
UpperCamelCase__ = {}
for iter in range(max_iters):
UpperCamelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1000 / max_iters))
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Dict = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
lowercase_ : Union[str, Any] = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
lowercase_ : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : Optional[int] = get_config_parser()
lowercase_ : int = config_parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
UpperCamelCase__ = input('Enter image url: ').strip()
print(f"""Downloading image from {url} ...""")
UpperCamelCase__ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
UpperCamelCase__ = soup.find('meta', {'property': 'og:image'})['content']
UpperCamelCase__ = requests.get(image_url).content
UpperCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
UpperCamelCase__ = {'target_lang': 'fi', 'source_lang': 'en'}
UpperCamelCase__ = '>>zh<<'
UpperCamelCase__ = 'Helsinki-NLP/'
if is_torch_available():
UpperCamelCase__ = 'pt'
elif is_tf_available():
UpperCamelCase__ = 'tf'
else:
UpperCamelCase__ = 'jax'
@require_sentencepiece
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = MarianTokenizer
__lowerCamelCase: Optional[Any] = False
__lowerCamelCase: Optional[Any] = True
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
lowercase_ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
lowercase_ : Optional[int] = dict(zip(a , range(len(a ) ) ) )
lowercase_ : Any = Path(self.tmpdirname )
save_json(a , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(a , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(a , save_dir / VOCAB_FILES_NAMES["target_spm"] )
lowercase_ : str = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = "</s>"
lowercase_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(a ) , 9 )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowercase_ : List[Any] = en_de_tokenizer(["I am a small frog"] , return_tensors=a )
self.assertIsInstance(a , a )
lowercase_ : List[str] = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(a , batch.input_ids[0] )
lowercase_ : str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(a )
lowercase_ : str = [x.name for x in Path(a ).glob("*" )]
self.assertIn("source.spm" , a )
MarianTokenizer.from_pretrained(a )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : List[str] = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=a , truncation=a , return_tensors=a )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizer()
lowercase_ : Dict = tok(["I am a tiny frog", "I am a small frog"] , padding=a , return_tensors=a )
self.assertIsInstance(a , a )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
lowercase_ : Tuple = "Tämä on testi"
lowercase_ : Any = "This is a test"
lowercase_ : int = [7_6, 7, 2_0_4_7, 2]
lowercase_ : int = [6_9, 1_2, 1_1, 9_4_0, 2]
lowercase_ : Optional[int] = tokenizer(a ).input_ids
self.assertListEqual(a , a )
lowercase_ : Dict = tokenizer(text_target=a ).input_ids
self.assertListEqual(a , a )
lowercase_ : List[Any] = tokenizer.decode(a , skip_special_tokens=a )
self.assertEqual(a , a )
| 640
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCAmelCase ( snake_case ):
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = tempfile.mkdtemp()
lowercase_ : int = 8
# DPR tok
lowercase_ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(a , exist_ok=a )
lowercase_ : Any = os.path.join(a , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase_ : int = dict(zip(a , range(len(a ) ) ) )
lowercase_ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase_ : List[str] = {"unk_token": "<unk>"}
lowercase_ : List[str] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(a , exist_ok=a )
lowercase_ : Optional[Any] = os.path.join(a , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase_ : str = os.path.join(a , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowercase_ : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowercase_ : Optional[int] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a )
rag_tokenizer.save_pretrained(a )
lowercase_ : Dict = RagTokenizer.from_pretrained(a , config=a )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowercase_ : Union[str, Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase_ : List[Any] = tokenizer(a )
self.assertIsNotNone(a )
@slow
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Dict = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowercase_ : Tuple = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase_ : int = tokenizer(a )
self.assertIsNotNone(a )
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase_ : List[Any] = Vector()
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a ) , "(0,0,0,0,0,1)" )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[int] = Vector([1, 2, 3, 4] )
self.assertEqual(len(a ) , 4 )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[int] = Vector([1, 2] )
lowercase_ : Any = Vector([1, 2, 3, 4, 5] )
lowercase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase_ : str = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = Vector([1, 2, 3] )
lowercase_ : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Any = Vector([1, 2, 3] )
lowercase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : int = Vector([1, 2, 3] )
lowercase_ : str = Vector([2, -1, 4] ) # for test of dot product
lowercase_ : Dict = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertEqual(str(zero_vector(1_0 ) ).count("0" ) , 1_0 )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = Vector([1, 2, 3] )
lowercase_ : str = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a , a ) ) , "(3,4,7)" )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
lowercase_ : str = x.copy()
self.assertEqual(str(a ) , str(a ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a ) , "(0,1,0)" )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(a ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase_ : Any = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a , a ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase_ : Union[str, Any] = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a , a ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase_ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(a ) )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase_ : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase_ : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 640
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 1
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'allenai/led-base-16384': 16384,
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Dict = LEDTokenizer
__lowerCamelCase: List[str] = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , a : Optional[int]=None , a : List[Any]=None , a : Union[str, Any]=None , a : int="replace" , a : List[Any]="<s>" , a : str="</s>" , a : List[str]="</s>" , a : Union[str, Any]="<s>" , a : Any="<unk>" , a : Any="<pad>" , a : Any="<mask>" , a : int=False , a : int=True , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
lowercase_ : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : List[Any] = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : Optional[Any] = add_prefix_space
lowercase_ : List[str] = pre_tok_class(**a )
lowercase_ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ : Union[str, Any] = "post_processor"
lowercase_ : List[Any] = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowercase_ : Any = tuple(state["cls"] )
lowercase_ : Optional[Any] = False
if state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : Optional[Any] = add_prefix_space
lowercase_ : int = True
if state.get("trim_offsets" , a ) != trim_offsets:
lowercase_ : Optional[int] = trim_offsets
lowercase_ : List[Any] = True
if changes_to_apply:
lowercase_ : Optional[int] = getattr(a , state.pop("type" ) )
lowercase_ : Union[str, Any] = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
lowercase_ : Union[str, Any] = value
def lowerCAmelCase__ ( self : List[Any] , *a : Optional[Any] , **a : Optional[Any] ):
'''simple docstring'''
lowercase_ : str = kwargs.get("is_split_into_words" , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : List[Any] , *a : Optional[int] , **a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = kwargs.get("is_split_into_words" , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Dict , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : Optional[Any] , a : List[Any] , a : Optional[Any]=None ):
'''simple docstring'''
lowercase_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Union[str, Any] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self : Dict , a : Union[Dict[str, EncodedInput], BatchEncoding] , a : Optional[int] = None , a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a : Optional[int] = None , a : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ : List[Any] = super()._pad(
encoded_inputs=a , max_length=a , padding_strategy=a , pad_to_multiple_of=a , return_attention_mask=a , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ : List[str] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ : Optional[Any] = len(encoded_inputs["global_attention_mask"] ) != len(a )
if needs_to_be_padded:
lowercase_ : Optional[int] = len(a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ : List[Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ : Optional[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 640
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(_UpperCamelCase , _UpperCamelCase ) -> str:
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase_ : str = int(max(0 , i - limit ) )
lowercase_ : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCamelCase )
lowercase_ : Optional[Any] = F"""{_stra[0:_stra.index(_UpperCamelCase )]} {_stra[_stra.index(_UpperCamelCase ) + 1:]}"""
return "".join(_UpperCamelCase )
# matching characters
lowercase_ : int = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
lowercase_ : List[str] = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Union[str, Any] = len(_UpperCamelCase )
# transposition
lowercase_ : Union[str, Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCamelCase , _UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
lowercase_ : Tuple = 0.0
else:
lowercase_ : Tuple = (
1
/ 3
* (
match_count / len(_UpperCamelCase )
+ match_count / len(_UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase_ : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if index == number_of_items:
return 0
lowercase_ : Tuple = 0
lowercase_ : Dict = 0
lowercase_ : int = knapsack(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , index + 1 )
if weights[index] <= max_weight:
lowercase_ : Tuple = values[index] + knapsack(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , max_weight - weights[index] , index + 1 )
return max(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : List[str] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase_ : str = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase_ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase_ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = self.get_dummy_components()
lowercase_ : Any = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Dict = self.get_dummy_inputs(a )
lowercase_ : List[str] = inputs["prompt"]
lowercase_ : List[str] = inputs["generator"]
lowercase_ : Optional[int] = inputs["num_inference_steps"]
lowercase_ : Tuple = inputs["output_type"]
if "image" in inputs:
lowercase_ : Optional[Any] = inputs["image"]
else:
lowercase_ : int = None
if "mask_image" in inputs:
lowercase_ : str = inputs["mask_image"]
else:
lowercase_ : Dict = None
if "original_image" in inputs:
lowercase_ : Any = inputs["original_image"]
else:
lowercase_ : Optional[int] = None
lowercase_ , lowercase_ : Tuple = pipe.encode_prompt(a )
# inputs with prompt converted to embeddings
lowercase_ : Tuple = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase_ : List[Any] = image
if mask_image is not None:
lowercase_ : Any = mask_image
if original_image is not None:
lowercase_ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a , a , a )
lowercase_ : Union[str, Any] = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
lowercase_ : List[str] = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a , a ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowercase_ : str = self.get_dummy_inputs(a )
lowercase_ : Any = inputs["generator"]
lowercase_ : int = inputs["num_inference_steps"]
lowercase_ : int = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase_ : Optional[int] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase_ : Optional[int] = image
if mask_image is not None:
lowercase_ : Dict = mask_image
if original_image is not None:
lowercase_ : Union[str, Any] = original_image
lowercase_ : Tuple = pipe_loaded(**a )[0]
lowercase_ : List[Any] = np.abs(to_np(a ) - to_np(a ) ).max()
self.assertLess(a , 1e-4 )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_dummy_components()
lowercase_ : str = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Optional[int] = self.get_dummy_inputs(a )
lowercase_ : int = pipe(**a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a )
lowercase_ : Tuple = self.pipeline_class.from_pretrained(a )
pipe_loaded.to(a )
pipe_loaded.set_progress_bar_config(disable=a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase_ : Tuple = self.get_dummy_inputs(a )
lowercase_ : Tuple = pipe_loaded(**a )[0]
lowercase_ : List[str] = np.abs(to_np(a ) - to_np(a ) ).max()
self.assertLess(a , 1e-4 )
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Any = 'mvp'
__lowerCamelCase: List[str] = ['past_key_values']
__lowerCamelCase: int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[Any] , a : Union[str, Any]=5_0_2_6_7 , a : Union[str, Any]=1_0_2_4 , a : Optional[Any]=1_2 , a : Dict=4_0_9_6 , a : Optional[int]=1_6 , a : Optional[Any]=1_2 , a : Optional[int]=4_0_9_6 , a : Optional[int]=1_6 , a : Optional[Any]=0.0 , a : Dict=0.0 , a : List[str]="gelu" , a : int=1_0_2_4 , a : int=0.1 , a : List[str]=0.0 , a : int=0.0 , a : Optional[int]=0.02 , a : Any=0.0 , a : str=False , a : List[str]=True , a : Union[str, Any]=1 , a : List[Any]=0 , a : Any=2 , a : Optional[Any]=True , a : int=2 , a : Dict=2 , a : str=False , a : List[Any]=1_0_0 , a : int=8_0_0 , **a : str , ):
'''simple docstring'''
lowercase_ : Dict = vocab_size
lowercase_ : Any = max_position_embeddings
lowercase_ : Tuple = d_model
lowercase_ : Any = encoder_ffn_dim
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Union[str, Any] = encoder_attention_heads
lowercase_ : Tuple = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : List[str] = decoder_attention_heads
lowercase_ : int = dropout
lowercase_ : str = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Tuple = activation_function
lowercase_ : int = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[Any] = decoder_layerdrop
lowercase_ : Optional[int] = classifier_dropout
lowercase_ : int = use_cache
lowercase_ : Any = encoder_layers
lowercase_ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : List[str] = use_prompt
lowercase_ : Tuple = prompt_length
lowercase_ : str = prompt_mid_dim
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , a ):
lowercase_ : Tuple = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"The config can simply be saved and uploaded again to be fixed." )
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ = 'docs/source/en/_toctree.yml'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : int = defaultdict(_UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ : int = [key for key, value in counts.items() if value > 1]
lowercase_ : int = []
for duplicate_key in duplicates:
lowercase_ : List[Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_UpperCamelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_UpperCamelCase , key=lambda _UpperCamelCase : s["title"].lower() )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=False ):
"""simple docstring"""
with open(_UpperCamelCase , encoding="utf-8" ) as f:
lowercase_ : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ : int = content[api_idx]["sections"]
# Then to the model doc
lowercase_ : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ : Dict = api_doc[model_idx]["sections"]
lowercase_ : Union[str, Any] = [(idx, section) for idx, section in enumerate(_UpperCamelCase ) if "sections" in section]
lowercase_ : str = False
for idx, modality_doc in modalities_docs:
lowercase_ : Tuple = modality_doc["sections"]
lowercase_ : Union[str, Any] = clean_model_doc_toc(_UpperCamelCase )
if old_modality_doc != new_modality_doc:
lowercase_ : Optional[Any] = True
if overwrite:
lowercase_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
lowercase_ : Optional[int] = model_doc
lowercase_ : int = api_doc
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_UpperCamelCase , allow_unicode=_UpperCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = [0 for i in range(len(_UpperCamelCase ) )]
# initialize interval's left pointer and right pointer
lowercase_ , lowercase_ : str = 0, 0
for i in range(1 , len(_UpperCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase_ : int = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase_ : str = min_edge
while go_next(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase_ , lowercase_ : Optional[Any] = i, i + z_result[i] - 1
return z_result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return i + z_result[i] < len(_UpperCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase_ : Tuple = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('KEY')
UpperCamelCase__ = TypeVar('VAL')
@dataclass(frozen=snake_case , slots=snake_case )
class _UpperCAmelCase ( Generic[KEY, VAL] ):
__lowerCamelCase: KEY
__lowerCamelCase: VAL
class _UpperCAmelCase ( _Item ):
def __init__( self : Dict ):
'''simple docstring'''
super().__init__(a , a )
def __bool__( self : Optional[Any] ):
'''simple docstring'''
return False
UpperCamelCase__ = _DeletedItem()
class _UpperCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[int] , a : int = 8 , a : float = 0.75 ):
'''simple docstring'''
lowercase_ : List[str] = initial_block_size
lowercase_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowercase_ : str = capacity_factor
lowercase_ : Dict = 0
def lowerCAmelCase__ ( self : List[Any] , a : KEY ):
'''simple docstring'''
return hash(a ) % len(self._buckets )
def lowerCAmelCase__ ( self : Dict , a : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCAmelCase__ ( self : Any , a : int , a : KEY , a : VAL ):
'''simple docstring'''
lowercase_ : str = self._buckets[ind]
if not stored:
lowercase_ : Tuple = _Item(a , a )
self._len += 1
return True
elif stored.key == key:
lowercase_ : str = _Item(a , a )
return True
else:
return False
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowercase_ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase__ ( self : List[str] , a : int ):
'''simple docstring'''
lowercase_ : str = self._buckets
lowercase_ : Tuple = [None] * new_size
lowercase_ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase__ ( self : Dict , a : KEY ):
'''simple docstring'''
lowercase_ : List[str] = self._get_bucket_index(a )
for _ in range(len(self._buckets ) ):
yield ind
lowercase_ : List[Any] = self._get_next_ind(a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : KEY , a : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
if self._try_set(a , a , a ):
break
def __setitem__( self : Optional[int] , a : KEY , a : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(a , a )
def __delitem__( self : Optional[Any] , a : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
lowercase_ : int = self._buckets[ind]
if item is None:
raise KeyError(a )
if item is _deleted:
continue
if item.key == key:
lowercase_ : Any = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Dict , a : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(a ):
lowercase_ : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(a )
def __len__( self : List[str] ):
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = " ,".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
UpperCamelCase__ = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: List[str]
__lowerCamelCase: Optional[str] = None
# Automatically constructed
__lowerCamelCase: ClassVar[str] = "dict"
__lowerCamelCase: ClassVar[Any] = None
__lowerCamelCase: str = field(default='Translation' , init=snake_case , repr=snake_case )
def __call__( self : Any ):
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _UpperCAmelCase :
__lowerCamelCase: Optional[List] = None
__lowerCamelCase: Optional[int] = None
__lowerCamelCase: Optional[str] = None
# Automatically constructed
__lowerCamelCase: ClassVar[str] = "dict"
__lowerCamelCase: ClassVar[Any] = None
__lowerCamelCase: str = field(default='TranslationVariableLanguages' , init=snake_case , repr=snake_case )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
lowercase_ : Union[str, Any] = len(self.languages ) if self.languages else None
def __call__( self : Any ):
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowerCAmelCase__ ( self : Union[str, Any] , a : int ):
'''simple docstring'''
lowercase_ : str = set(self.languages )
if self.languages and set(a ) - lang_set:
raise ValueError(
f"""Some languages in example ({", ".join(sorted(set(a ) - lang_set ) )}) are not in valid set ({", ".join(a )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase_ : Any = []
for lang, text in translation_dict.items():
if isinstance(a , a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase_ , lowercase_ : int = zip(*sorted(a ) )
return {"language": languages, "translation": translations}
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case )
class _UpperCAmelCase ( snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__lowerCamelCase: str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
__lowerCamelCase: ClassVar[Features] = Features({'text': Value('string' )} )
__lowerCamelCase: ClassVar[Features] = Features({'summary': Value('string' )} )
__lowerCamelCase: str = "text"
__lowerCamelCase: str = "summary"
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: List[str] = PegasusTokenizer
__lowerCamelCase: Tuple = PegasusTokenizerFast
__lowerCamelCase: List[str] = True
__lowerCamelCase: Union[str, Any] = True
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Tuple = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCAmelCase__ ( self : Tuple , **a : int ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = "</s>"
lowercase_ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(a ) , 1_1_0_3 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : str = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
lowercase_ : str = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
lowercase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase_ : Optional[int] = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
lowercase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowercase_ : Optional[int] = "To ensure a smooth flow of bank resolutions."
lowercase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowercase_ : str = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = ["This is going to be way too long." * 1_5_0, "short example"]
lowercase_ : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
lowercase_ : Any = self._large_tokenizer(a , padding=a , truncation=a , return_tensors="pt" )
lowercase_ : Union[str, Any] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = {"input_ids": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: int = PegasusTokenizer
__lowerCamelCase: List[str] = PegasusTokenizerFast
__lowerCamelCase: Any = True
__lowerCamelCase: Optional[int] = True
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCAmelCase__ ( self : str , **a : Any ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def lowerCAmelCase__ ( self : Optional[int] , a : Tuple ):
'''simple docstring'''
return ("This is a test", "This is a test")
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase_ : Any = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
lowercase_ : Dict = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
lowercase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = ["This is going to be way too long." * 1_0_0_0, "short example"]
lowercase_ : List[str] = ["not super long but more than 5 tokens", "tiny"]
lowercase_ : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors="pt" )
lowercase_ : Union[str, Any] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
lowercase_ : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 640
|
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCAmelCase ( snake_case ):
def __init__( self : Tuple , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[str] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase_ : str = field
lowercase_ : Optional[Any] = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase_ : Any = Json(
cache_dir=a , data_files=a , features=a , field=a , **a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
lowercase_ : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase_ : Dict = None
lowercase_ : Optional[int] = None
lowercase_ : str = None
lowercase_ : str = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase_ : int = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _UpperCAmelCase :
def __init__( self : str , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , a : Optional[int] = None , **a : List[Any] , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
lowercase_ : Dict = dataset
lowercase_ : Optional[int] = path_or_buf
lowercase_ : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase_ : Optional[Any] = num_proc
lowercase_ : List[Any] = "utf-8"
lowercase_ : List[str] = to_json_kwargs
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : str = self.to_json_kwargs.pop("path_or_buf" , a )
lowercase_ : Any = self.to_json_kwargs.pop("orient" , "records" )
lowercase_ : Any = self.to_json_kwargs.pop("lines" , True if orient == "records" else False )
lowercase_ : List[str] = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True )
lowercase_ : int = self.to_json_kwargs.pop("compression" , a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , "wb" , compression=a ) as buffer:
lowercase_ : Dict = self._write(file_obj=a , orient=a , lines=a , index=a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
" was passed. Please provide a local path instead." )
lowercase_ : Dict = self._write(
file_obj=self.path_or_buf , orient=a , lines=a , index=a , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Optional[int] , a : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = args
lowercase_ : Optional[int] = query_table(
table=self.dataset.data , key=slice(a , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase_ : Dict = batch.to_pandas().to_json(
path_or_buf=a , orient=a , lines=a , index=a , **a )
if not json_str.endswith("\n" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : int , a : BinaryIO , a : int , a : str , a : Union[str, Any] , **a : str , ):
'''simple docstring'''
lowercase_ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
lowercase_ : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(a )
else:
lowercase_ , lowercase_ : Any = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , a , a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ):
written += file_obj.write(a )
return written
| 640
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=snake_case ):
__lowerCamelCase: List[str] = ['speech']
def __init__( self : Optional[int] , *a : List[Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["speech"] )
class _UpperCAmelCase ( metaclass=snake_case ):
__lowerCamelCase: List[str] = ['speech']
def __init__( self : List[str] , *a : Tuple , **a : str ):
'''simple docstring'''
requires_backends(self , ["speech"] )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square(_UpperCamelCase , _UpperCamelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase_ : List[str] = update_area_of_max_square(_UpperCamelCase , col + 1 )
lowercase_ : List[Any] = update_area_of_max_square(row + 1 , col + 1 )
lowercase_ : Tuple = update_area_of_max_square(row + 1 , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Optional[int] = 1 + min([right, diagonal, down] )
lowercase_ : Any = max(largest_square_area[0] , _UpperCamelCase )
return sub_problem_sol
else:
return 0
lowercase_ : int = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase_ : Dict = update_area_of_max_square_using_dp_array(_UpperCamelCase , col + 1 , _UpperCamelCase )
lowercase_ : str = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _UpperCamelCase )
lowercase_ : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , _UpperCamelCase , _UpperCamelCase )
if mat[row][col]:
lowercase_ : Tuple = 1 + min([right, diagonal, down] )
lowercase_ : int = max(largest_square_area[0] , _UpperCamelCase )
lowercase_ : Dict = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase_ : Any = [0]
lowercase_ : Optional[int] = [[-1] * cols for _ in range(_UpperCamelCase )]
update_area_of_max_square_using_dp_array(0 , 0 , _UpperCamelCase )
return largest_square_area[0]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase_ : List[Any] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Tuple = dp_array[row][col + 1]
lowercase_ : List[str] = dp_array[row + 1][col + 1]
lowercase_ : List[Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase_ : Any = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = max(dp_array[row][col] , _UpperCamelCase )
else:
lowercase_ : int = 0
return largest_square_area
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = [0] * (cols + 1)
lowercase_ : Union[str, Any] = [0] * (cols + 1)
lowercase_ : int = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase_ : Dict = current_row[col + 1]
lowercase_ : List[Any] = next_row[col + 1]
lowercase_ : Tuple = next_row[col]
if mat[row][col] == 1:
lowercase_ : Dict = 1 + min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : int = max(current_row[col] , _UpperCamelCase )
else:
lowercase_ : Tuple = 0
lowercase_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 640
| 1
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( snake_case ):
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
lowercase_ : Optional[int] = 5
# Realm tok
lowercase_ : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase_ : int = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(a , exist_ok=a )
lowercase_ : Union[str, Any] = os.path.join(a , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowercase_ : List[Any] = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(a , exist_ok=a )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : List[str] = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=a , )
return block_records
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Dict = self.get_config()
lowercase_ : Any = self.get_dummy_retriever()
lowercase_ : Tuple = retriever.tokenizer
lowercase_ : str = np.array([0, 3] , dtype="long" )
lowercase_ : int = tokenizer(["Test question"] ).input_ids
lowercase_ : List[Any] = tokenizer(
["the fourth"] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
lowercase_ : Union[str, Any] = config.reader_seq_len
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = retriever(
a , a , answer_ids=a , max_length=a , return_tensors="np" )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(len(a ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Tuple = self.get_config()
lowercase_ : Optional[int] = self.get_dummy_retriever()
lowercase_ : int = retriever.tokenizer
lowercase_ : Tuple = np.array([0, 3, 5] , dtype="long" )
lowercase_ : List[str] = tokenizer(["Test question"] ).input_ids
lowercase_ : Dict = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=a , return_token_type_ids=a , return_attention_mask=a , ).input_ids
lowercase_ : Union[str, Any] = config.reader_seq_len
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = retriever(
a , a , answer_ids=a , max_length=a , return_tensors="np" )
self.assertEqual([False, True, True] , a )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , a )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , a )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
lowercase_ : str = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
lowercase_ : str = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase_ : List[Any] = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 640
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase__ = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
lowercase_ : int = TestCommand(*_UpperCamelCase )
test_command.run()
lowercase_ : List[str] = os.path.join(_UpperCamelCase , "README.md" )
assert os.path.exists(_UpperCamelCase )
lowercase_ : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
lowercase_ : Optional[int] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 235_1563,
"num_examples": 1_0000,
},
{
"name": "validation",
"num_bytes": 23_8418,
"num_examples": 1000,
},
] , download_size=394_0680 , dataset_size=258_9981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : Optional[int] = getattr(dataset_infos["default"] , _UpperCamelCase ), getattr(expected_dataset_infos["default"] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 640
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
def __init__( self : Dict , *a : Optional[int] , **a : Any ):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , a , )
super().__init__(*a , **a )
| 640
|
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
UpperCamelCase__ = ['text', 'image', 'audio']
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
inputs.append(create_inputs(_UpperCamelCase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = []
for output in outputs:
if isinstance(_UpperCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_UpperCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_UpperCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
lowercase_ : Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , a ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : List[str] = create_inputs(self.tool.inputs )
lowercase_ : List[str] = self.tool(*a )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase_ : Union[str, Any] = [outputs]
self.assertListEqual(output_types(a ) , self.tool.outputs )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Any = create_inputs(self.tool.inputs )
lowercase_ : str = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
for output, output_type in zip(a , self.tool.outputs ):
lowercase_ : int = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(a , a ) )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : Dict = create_inputs(self.tool.inputs )
lowercase_ : Optional[int] = []
for _input, input_type in zip(a , self.tool.inputs ):
if isinstance(a , a ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase_ : Any = self.tool(*a )
if not isinstance(a , a ):
lowercase_ : Any = [outputs]
self.assertEqual(len(a ) , len(self.tool.outputs ) )
| 640
| 1
|
'''simple docstring'''
import math
UpperCamelCase__ = 10
UpperCamelCase__ = 7
UpperCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 20 ):
"""simple docstring"""
lowercase_ : Dict = math.comb(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _UpperCamelCase )
lowercase_ : Tuple = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : int = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowercase_ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _UpperCAmelCase ( snake_case ):
def __init__( self : int , a : MultilingualCLIP , a : XLMRobertaTokenizer , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=a , tokenizer=a , unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : List[Any] , a : Tuple , a : List[str] , a : Optional[Any] , a : str , a : Tuple , a : List[str] ):
'''simple docstring'''
if latents is None:
lowercase_ : List[str] = randn_tensor(a , generator=a , device=a , dtype=a )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase_ : Optional[int] = latents.to(a )
lowercase_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self : Optional[Any] , a : List[str] , a : List[Any] , a : Union[str, Any] , a : str , a : Tuple=None , ):
'''simple docstring'''
lowercase_ : Tuple = len(a ) if isinstance(a , a ) else 1
# get prompt text embeddings
lowercase_ : Any = self.tokenizer(
a , padding="max_length" , truncation=a , max_length=7_7 , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : Union[str, Any] = text_inputs.input_ids
lowercase_ : Tuple = self.tokenizer(a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a , a ):
lowercase_ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase_ : List[str] = text_input_ids.to(a )
lowercase_ : int = text_inputs.attention_mask.to(a )
lowercase_ , lowercase_ : Optional[int] = self.text_encoder(
input_ids=a , attention_mask=a )
lowercase_ : str = prompt_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = text_encoder_hidden_states.repeat_interleave(a , dim=0 )
lowercase_ : int = text_mask.repeat_interleave(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str]
if negative_prompt is None:
lowercase_ : int = [""] * batch_size
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase_ : Tuple = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
lowercase_ : Dict = negative_prompt
lowercase_ : str = self.tokenizer(
a , padding="max_length" , max_length=7_7 , truncation=a , return_attention_mask=a , add_special_tokens=a , return_tensors="pt" , )
lowercase_ : List[Any] = uncond_input.input_ids.to(a )
lowercase_ : Optional[int] = uncond_input.attention_mask.to(a )
lowercase_ , lowercase_ : int = self.text_encoder(
input_ids=a , attention_mask=a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase_ : List[str] = negative_prompt_embeds.shape[1]
lowercase_ : Dict = negative_prompt_embeds.repeat(1 , a )
lowercase_ : Optional[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , a )
lowercase_ : Any = uncond_text_encoder_hidden_states.shape[1]
lowercase_ : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , a , 1 )
lowercase_ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , a , -1 )
lowercase_ : List[Any] = uncond_text_mask.repeat_interleave(a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase_ : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowercase_ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowercase_ : Any = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self : Tuple , a : Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : str = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Union[str, Any] , a : List[str]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(a , a , prev_module_hook=a )
if self.safety_checker is not None:
lowercase_ , lowercase_ : Optional[int] = cpu_offload_with_hook(self.safety_checker , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Tuple , a : Union[str, List[str]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Optional[Union[str, List[str]]] = None , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[torch.FloatTensor] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , a ):
lowercase_ : List[str] = 1
elif isinstance(a , a ):
lowercase_ : int = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
lowercase_ : Tuple = self._execution_device
lowercase_ : Dict = batch_size * num_images_per_prompt
lowercase_ : Dict = guidance_scale > 1.0
lowercase_ , lowercase_ , lowercase_ : List[str] = self._encode_prompt(
a , a , a , a , a )
if isinstance(a , a ):
lowercase_ : Optional[int] = torch.cat(a , dim=0 )
if isinstance(a , a ):
lowercase_ : int = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[int] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : str = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=a )
self.scheduler.set_timesteps(a , device=a )
lowercase_ : List[str] = self.scheduler.timesteps
lowercase_ : str = self.unet.config.in_channels
lowercase_ , lowercase_ : int = get_new_h_w(a , a , self.movq_scale_factor )
# create initial latent
lowercase_ : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , a , a , a , self.scheduler , )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : Optional[int] = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[Any] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : int = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : str = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Tuple = self.scheduler.step(
a , a , a , generator=a , ).prev_sample
# post-processing
lowercase_ : Union[str, Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : List[Any] = image * 0.5 + 0.5
lowercase_ : Optional[int] = image.clamp(0 , 1 )
lowercase_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : List[str] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=512 , _UpperCamelCase=512 ):
"""simple docstring"""
lowercase_ : Union[str, Any] = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : str = np.array(pil_image.convert("RGB" ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 127.5 - 1
lowercase_ : int = np.transpose(_UpperCamelCase , [2, 0, 1] )
lowercase_ : str = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class _UpperCAmelCase ( snake_case ):
def __init__( self : List[Any] , a : UNetaDConditionModel , a : DDPMScheduler , a : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=a , scheduler=a , movq=a , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self : Union[str, Any] , a : Tuple , a : List[str] , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = min(int(num_inference_steps * strength ) , a )
lowercase_ : str = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : Union[str, Any] , a : int , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any]=None ):
'''simple docstring'''
if not isinstance(a , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a )}""" )
lowercase_ : str = image.to(device=a , dtype=a )
lowercase_ : Any = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(a )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(a , a ):
lowercase_ : str = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase_ : List[Any] = torch.cat(a , dim=0 )
else:
lowercase_ : Tuple = self.movq.encode(a ).latent_dist.sample(a )
lowercase_ : Union[str, Any] = self.movq.config.scaling_factor * init_latents
lowercase_ : Tuple = torch.cat([init_latents] , dim=0 )
lowercase_ : List[Any] = init_latents.shape
lowercase_ : Union[str, Any] = randn_tensor(a , generator=a , device=a , dtype=a )
# get latents
lowercase_ : Dict = self.scheduler.add_noise(a , a , a )
lowercase_ : Tuple = init_latents
return latents
def lowerCAmelCase__ ( self : List[Any] , a : str=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase_ : Optional[Any] = torch.device(f"""cuda:{gpu_id}""" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a , a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[str, Any]=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase_ : Any = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Union[str, Any] = cpu_offload_with_hook(a , a , prev_module_hook=a )
# We'll offload the last model manually.
lowercase_ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a )
def __call__( self : Optional[int] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , a : Union[torch.FloatTensor, List[torch.FloatTensor]] , a : int = 5_1_2 , a : int = 5_1_2 , a : int = 1_0_0 , a : float = 4.0 , a : float = 0.3 , a : int = 1 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
lowercase_ : Optional[int] = self._execution_device
lowercase_ : Dict = guidance_scale > 1.0
if isinstance(a , a ):
lowercase_ : Dict = torch.cat(a , dim=0 )
lowercase_ : Dict = image_embeds.shape[0]
if isinstance(a , a ):
lowercase_ : str = torch.cat(a , dim=0 )
if do_classifier_free_guidance:
lowercase_ : Optional[Any] = image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = negative_image_embeds.repeat_interleave(a , dim=0 )
lowercase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a )
if not isinstance(a , a ):
lowercase_ : List[Any] = [image]
if not all(isinstance(a , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"""Input is in incorrect format: {[type(a ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
lowercase_ : List[Any] = torch.cat([prepare_image(a , a , a ) for i in image] , dim=0 )
lowercase_ : List[Any] = image.to(dtype=image_embeds.dtype , device=a )
lowercase_ : Optional[int] = self.movq.encode(a )["latents"]
lowercase_ : Dict = latents.repeat_interleave(a , dim=0 )
self.scheduler.set_timesteps(a , device=a )
lowercase_ , lowercase_ : List[Any] = self.get_timesteps(a , a , a )
lowercase_ : Tuple = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Optional[Any] = downscale_height_and_width(a , a , self.movq_scale_factor )
lowercase_ : Tuple = self.prepare_latents(
a , a , a , a , image_embeds.dtype , a , a )
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : int = {"image_embeds": image_embeds}
lowercase_ : Optional[int] = self.unet(
sample=a , timestep=a , encoder_hidden_states=a , added_cond_kwargs=a , return_dict=a , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : int = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Any = variance_pred.chunk(2 )
lowercase_ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
a , a , a , generator=a , )[0]
# post-processing
lowercase_ : Optional[Any] = self.movq.decode(a , force_not_quantize=a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase_ : Tuple = image * 0.5 + 0.5
lowercase_ : Any = image.clamp(0 , 1 )
lowercase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 640
| 1
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase__ = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase__ = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = simple_accuracy(_UpperCamelCase , _UpperCamelCase )
lowercase_ : str = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = np.array(_UpperCamelCase )
lowercase_ : int = np.array(_UpperCamelCase )
lowercase_ : Union[str, Any] = en_sentvecs.shape[0]
# mean centering
lowercase_ : Optional[Any] = en_sentvecs - np.mean(_UpperCamelCase , axis=0 )
lowercase_ : List[Any] = in_sentvecs - np.mean(_UpperCamelCase , axis=0 )
lowercase_ : Optional[int] = cdist(_UpperCamelCase , _UpperCamelCase , "cosine" )
lowercase_ : List[Any] = np.array(range(_UpperCamelCase ) )
lowercase_ : int = sim.argsort(axis=1 )[:, :10]
lowercase_ : Dict = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowerCAmelCase__ ( self : str , a : str , a : Optional[int] ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(a , a )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(a , a )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(a , a )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 640
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: str = ['image_processor', 'tokenizer']
__lowerCamelCase: Dict = 'Pix2StructImageProcessor'
__lowerCamelCase: Union[str, Any] = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , a : Dict , a : List[str] ):
'''simple docstring'''
lowercase_ : Optional[Any] = False
super().__init__(a , a )
def __call__( self : Tuple , a : int=None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : Optional[int] = 2_0_4_8 , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowercase_ : Dict = self.tokenizer
lowercase_ : Tuple = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowercase_ : Optional[int] = self.image_processor(
a , return_tensors=a , max_patches=a , **a )
else:
# add pixel_values and bbox
lowercase_ : Any = self.image_processor(
a , return_tensors=a , max_patches=a , header_text=a , **a )
if text is not None and not self.image_processor.is_vqa:
lowercase_ : int = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
if "attention_mask" in text_encoding:
lowercase_ : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
lowercase_ : Dict = text_encoding.pop("input_ids" )
else:
lowercase_ : str = None
if text_encoding is not None:
encoding_image_processor.update(a )
return encoding_image_processor
def lowerCAmelCase__ ( self : Any , *a : str , **a : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def lowerCAmelCase__ ( self : str , *a : Optional[int] , **a : Any ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Tuple = self.tokenizer.model_input_names
lowercase_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = 0
for ch in input_str:
lowercase_ : Dict = ord(_UpperCamelCase )
lowercase_ : Dict = pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Dict = KandinskyVaaPriorPipeline
__lowerCamelCase: Optional[int] = ['prompt']
__lowerCamelCase: Any = ['prompt', 'negative_prompt']
__lowerCamelCase: List[Any] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase: List[Any] = False
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1_0_0
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : List[str] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
lowercase_ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowercase_ : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
lowercase_ : Optional[Any] = CLIPVisionModelWithProjection(a )
return model
@property
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_2_4 , )
return image_processor
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Any = self.dummy_prior
lowercase_ : Optional[Any] = self.dummy_image_encoder
lowercase_ : List[Any] = self.dummy_text_encoder
lowercase_ : Any = self.dummy_tokenizer
lowercase_ : Optional[Any] = self.dummy_image_processor
lowercase_ : List[str] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
lowercase_ : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def lowerCAmelCase__ ( self : Any , a : Dict , a : Dict=0 ):
'''simple docstring'''
if str(a ).startswith("mps" ):
lowercase_ : int = torch.manual_seed(a )
else:
lowercase_ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowercase_ : Any = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : str = "cpu"
lowercase_ : Any = self.get_dummy_components()
lowercase_ : int = self.pipeline_class(**a )
lowercase_ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase_ : Any = pipe(**self.get_dummy_inputs(a ) )
lowercase_ : List[Any] = output.image_embeds
lowercase_ : str = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase_ : Any = image[0, -1_0:]
lowercase_ : Dict = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
lowercase_ : int = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : int = torch_device == "cpu"
lowercase_ : Tuple = True
lowercase_ : str = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Any = torch_device == "cpu"
lowercase_ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , )
| 640
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__lowerCamelCase: List[Any] = 'CIDAS/clipseg-rd64-refined'
__lowerCamelCase: List[str] = 'image_segmenter'
__lowerCamelCase: Dict = CLIPSegForImageSegmentation
__lowerCamelCase: Optional[int] = ['image', 'text']
__lowerCamelCase: Optional[int] = ['image']
def __init__( self : List[str] , *a : Dict , **a : Any ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*a , **a )
def lowerCAmelCase__ ( self : int , a : "Image" , a : str ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=a , return_tensors="pt" )
def lowerCAmelCase__ ( self : int , a : Tuple ):
'''simple docstring'''
with torch.no_grad():
lowercase_ : str = self.model(**a ).logits
return logits
def lowerCAmelCase__ ( self : Optional[Any] , a : Tuple ):
'''simple docstring'''
lowercase_ : List[str] = outputs.cpu().detach().numpy()
lowercase_ : Any = 0
lowercase_ : List[Any] = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 640
|
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
UpperCamelCase__ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : str = []
lowercase_ : List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowercase_ : float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowercase_ : Union[str, Any] = arr[j]
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = []
for i, outer in enumerate(_UpperCamelCase ):
lowercase_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowercase_ : Optional[Any] = inner
break
result.append(_UpperCamelCase )
return result
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = len(_UpperCamelCase )
lowercase_ : list[float] = []
lowercase_ : list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowercase_ : Optional[Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
UpperCamelCase__ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 640
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[int] = F"""{sampling_rate}"""
lowercase_ : Tuple = "1"
lowercase_ : Optional[Any] = "f32le"
lowercase_ : Union[str, Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_UpperCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowercase_ : Tuple = ffmpeg_process.communicate(_UpperCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowercase_ : Optional[int] = output_stream[0]
lowercase_ : List[str] = np.frombuffer(_UpperCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = "f32le" , ):
"""simple docstring"""
lowercase_ : List[str] = F"""{sampling_rate}"""
lowercase_ : Any = "1"
if format_for_conversion == "s16le":
lowercase_ : Dict = 2
elif format_for_conversion == "f32le":
lowercase_ : Any = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
lowercase_ : str = platform.system()
if system == "Linux":
lowercase_ : str = "alsa"
lowercase_ : Optional[Any] = "default"
elif system == "Darwin":
lowercase_ : str = "avfoundation"
lowercase_ : Dict = ":0"
elif system == "Windows":
lowercase_ : Tuple = "dshow"
lowercase_ : Any = "default"
lowercase_ : Any = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowercase_ : Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowercase_ : List[Any] = _ffmpeg_stream(_UpperCamelCase , _UpperCamelCase )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "f32le" , ):
"""simple docstring"""
if stream_chunk_s is not None:
lowercase_ : List[str] = stream_chunk_s
else:
lowercase_ : List[Any] = chunk_length_s
lowercase_ : Tuple = ffmpeg_microphone(_UpperCamelCase , _UpperCamelCase , format_for_conversion=_UpperCamelCase )
if format_for_conversion == "s16le":
lowercase_ : Optional[Any] = np.intaa
lowercase_ : int = 2
elif format_for_conversion == "f32le":
lowercase_ : Optional[int] = np.floataa
lowercase_ : Union[str, Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
lowercase_ : int = chunk_length_s / 6
lowercase_ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_UpperCamelCase , (int, float) ):
lowercase_ : str = [stride_length_s, stride_length_s]
lowercase_ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowercase_ : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowercase_ : str = datetime.datetime.now()
lowercase_ : Any = datetime.timedelta(seconds=_UpperCamelCase )
for item in chunk_bytes_iter(_UpperCamelCase , _UpperCamelCase , stride=(stride_left, stride_right) , stream=_UpperCamelCase ):
# Put everything back in numpy scale
lowercase_ : Dict = np.frombuffer(item["raw"] , dtype=_UpperCamelCase )
lowercase_ : Tuple = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowercase_ : Dict = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
"""simple docstring"""
lowercase_ : List[Any] = b""
lowercase_ , lowercase_ : int = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
lowercase_ : Union[str, Any] = 0
for raw in iterator:
acc += raw
if stream and len(_UpperCamelCase ) < chunk_len:
lowercase_ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_UpperCamelCase ) >= chunk_len:
# We are flushing the accumulator
lowercase_ : Dict = (_stride_left, stride_right)
lowercase_ : Optional[Any] = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowercase_ : Any = False
yield item
lowercase_ : Optional[int] = stride_left
lowercase_ : Dict = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_UpperCamelCase ) > stride_left:
lowercase_ : Dict = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowercase_ : int = False
yield item
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_UpperCamelCase , stdout=subprocess.PIPE , bufsize=_UpperCamelCase ) as ffmpeg_process:
while True:
lowercase_ : str = ffmpeg_process.stdout.read(_UpperCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 640
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[Any] = 'gpt_neox_japanese'
def __init__( self : List[str] , a : List[Any]=3_2_0_0_0 , a : Union[str, Any]=2_5_6_0 , a : Optional[Any]=3_2 , a : Any=3_2 , a : str=4 , a : Optional[int]="gelu" , a : Optional[Any]=1.00 , a : Dict=1_0_0_0_0 , a : List[Any]=2_0_4_8 , a : Dict=0.02 , a : int=1e-5 , a : Optional[int]=True , a : Union[str, Any]=3_1_9_9_6 , a : List[Any]=3_1_9_9_9 , a : List[str]=0.1 , a : Dict=0.0 , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowercase_ : int = vocab_size
lowercase_ : int = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = intermediate_multiple_size
lowercase_ : List[str] = hidden_act
lowercase_ : Optional[int] = rotary_pct
lowercase_ : Tuple = rotary_emb_base
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = layer_norm_eps
lowercase_ : List[str] = use_cache
lowercase_ : Any = attention_dropout
lowercase_ : List[Any] = hidden_dropout
| 640
| 1
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
UpperCamelCase__ = getLogger(__name__)
UpperCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 8 , _UpperCamelCase = DEFAULT_DEVICE , _UpperCamelCase=False , _UpperCamelCase="summarization" , _UpperCamelCase=None , **_UpperCamelCase , ):
"""simple docstring"""
lowercase_ : Dict = Path(_UpperCamelCase ).open("w" , encoding="utf-8" )
lowercase_ : Any = str(_UpperCamelCase )
lowercase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
lowercase_ : List[str] = model.half()
lowercase_ : Tuple = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
lowercase_ : List[Any] = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase , _UpperCamelCase )
if prefix is None:
lowercase_ : Any = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(_UpperCamelCase , _UpperCamelCase ) ) ):
lowercase_ : Optional[int] = [prefix + text for text in examples_chunk]
lowercase_ : Optional[Any] = tokenizer(_UpperCamelCase , return_tensors="pt" , truncation=_UpperCamelCase , padding="longest" ).to(_UpperCamelCase )
lowercase_ : Tuple = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_UpperCamelCase , )
lowercase_ : int = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
lowercase_ : List[str] = int(time.time() - start_time ) # seconds
lowercase_ : List[Any] = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=True ):
"""simple docstring"""
lowercase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("model_name" , type=_UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=_UpperCamelCase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=_UpperCamelCase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=_UpperCamelCase , required=_UpperCamelCase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=_UpperCamelCase , required=_UpperCamelCase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=_UpperCamelCase , required=_UpperCamelCase , default=_UpperCamelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=_UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=_UpperCamelCase , default=8 , required=_UpperCamelCase , help="batch size" )
parser.add_argument(
"--n_obs" , type=_UpperCamelCase , default=-1 , required=_UpperCamelCase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=_UpperCamelCase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowercase_ , lowercase_ : Dict = parser.parse_known_args()
lowercase_ : str = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
lowercase_ : Optional[Any] = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowercase_ : str = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
lowercase_ : Union[str, Any] = generate_summaries_or_translations(
_UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
lowercase_ : Tuple = calculate_bleu if "translation" in args.task else calculate_rouge
lowercase_ : List[str] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowercase_ : Any = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
lowercase_ : dict = score_fn(_UpperCamelCase , _UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
lowercase_ : List[str] = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 640
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Optional[Any] , a : Any ):
'''simple docstring'''
lowercase_ : List[Any] = str(id_ )
lowercase_ : List[str] = None
lowercase_ : Tuple = None
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = {} # {vertex:distance}
def __lt__( self : Optional[Any] , a : int ):
'''simple docstring'''
return self.key < other.key
def __repr__( self : Union[str, Any] ):
'''simple docstring'''
return self.id
def lowerCAmelCase__ ( self : Union[str, Any] , a : Optional[int] ):
'''simple docstring'''
self.neighbors.append(a )
def lowerCAmelCase__ ( self : Dict , a : int , a : Optional[int] ):
'''simple docstring'''
lowercase_ : int = weight
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = []
for u in graph:
lowercase_ : List[Any] = math.inf
lowercase_ : str = None
lowercase_ : Tuple = 0
lowercase_ : Tuple = graph[:]
while q:
lowercase_ : List[Any] = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowercase_ : Optional[int] = u
lowercase_ : Union[str, Any] = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for u in graph:
lowercase_ : str = math.inf
lowercase_ : int = None
lowercase_ : List[Any] = 0
lowercase_ : str = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
lowercase_ : List[Any] = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowercase_ : str = u
lowercase_ : Optional[int] = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] , a : List[str] , a : Any , a : int , a : List[Any] , a : List[str]=1 , a : Optional[int]=False , **a : Tuple ):
'''simple docstring'''
super().__init__(**a )
lowercase_ : Tuple = vocab_size
lowercase_ : Union[str, Any] = d_embed
lowercase_ : Dict = d_proj
lowercase_ : Any = cutoffs + [vocab_size]
lowercase_ : Optional[int] = [0] + self.cutoffs
lowercase_ : List[str] = div_val
lowercase_ : Any = self.cutoffs[0]
lowercase_ : int = len(self.cutoffs ) - 1
lowercase_ : Any = self.shortlist_size + self.n_clusters
lowercase_ : Union[str, Any] = keep_order
lowercase_ : List[str] = []
lowercase_ : Optional[Any] = []
def lowerCAmelCase__ ( self : str , a : Optional[Any] ):
'''simple docstring'''
if self.n_clusters > 0:
lowercase_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=a , name="cluster_weight" )
lowercase_ : int = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=a , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase_ : str = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=a , name=f"""out_projs_._{i}""" , )
self.out_projs.append(a )
else:
self.out_projs.append(a )
lowercase_ : Dict = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=a , name=f"""out_layers_._{i}_._weight""" , )
lowercase_ : str = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=a , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase_ : Optional[int] = self.d_embed // (self.div_val**i)
lowercase_ : Union[str, Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=a , name=f"""out_projs_._{i}""" )
self.out_projs.append(a )
lowercase_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=a , name=f"""out_layers_._{i}_._weight""" , )
lowercase_ : str = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=a , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(a )
@staticmethod
def lowerCAmelCase__ ( a : Union[str, Any] , a : Optional[Any] , a : Any , a : Tuple=None ):
'''simple docstring'''
lowercase_ : List[Any] = x
if proj is not None:
lowercase_ : int = tf.einsum("ibd,ed->ibe" , a , a )
return tf.einsum("ibd,nd->ibn" , a , a ) + b
@staticmethod
def lowerCAmelCase__ ( a : Optional[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = shape_list(a )
lowercase_ : int = tf.range(lp_size[0] , dtype=target.dtype )
lowercase_ : int = tf.stack([r, target] , 1 )
return tf.gather_nd(a , a )
def lowerCAmelCase__ ( self : Tuple , a : List[str] , a : Any , a : List[Any]=True , a : Dict=False ):
'''simple docstring'''
lowercase_ : Optional[Any] = 0
if self.n_clusters == 0:
lowercase_ : List[Any] = self._logit(a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a , logits=a )
lowercase_ : Tuple = tf.nn.log_softmax(a , axis=-1 )
else:
lowercase_ : Optional[int] = shape_list(a )
lowercase_ : Optional[int] = []
lowercase_ : Union[str, Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase_ , lowercase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase_ : Optional[Any] = (target >= l_idx) & (target < r_idx)
lowercase_ : Optional[int] = tf.where(a )
lowercase_ : Dict = tf.boolean_mask(a , a ) - l_idx
if self.div_val == 1:
lowercase_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
lowercase_ : Optional[int] = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase_ : Tuple = self.out_layers[i][0]
lowercase_ : str = self.out_layers[i][1]
if i == 0:
lowercase_ : int = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase_ : Any = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase_ : Optional[int] = self._logit(a , a , a , self.out_projs[0] )
lowercase_ : str = tf.nn.log_softmax(a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase_ : Optional[int] = tf.boolean_mask(a , a )
lowercase_ : int = self._gather_logprob(a , a )
else:
lowercase_ : Tuple = self._logit(a , a , a , self.out_projs[i] )
lowercase_ : str = tf.nn.log_softmax(a )
lowercase_ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase_ : List[str] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a )
if target is not None:
lowercase_ : Any = tf.boolean_mask(a , a )
lowercase_ : Optional[Any] = tf.boolean_mask(a , a )
lowercase_ : str = self._gather_logprob(a , a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a , -cur_logprob , shape_list(a ) )
lowercase_ : str = tf.concat(a , axis=-1 )
if target is not None:
if return_mean:
lowercase_ : Optional[int] = tf.reduce_mean(a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Any = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowercase_ : List[str] = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Union[str, Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : Any = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowercase_ , lowercase_ : Tuple = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowercase_ : List[Any] = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase__ = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase__ = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 640
| 1
|
'''simple docstring'''
import operator as op
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : List[Any] = lambda _UpperCamelCase , _UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
lowercase_ : List[str] = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
else:
lowercase_ : Dict = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
lowercase_ : Optional[int] = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
UpperCamelCase__ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Dict = 0
lowercase_ : Optional[Any] = len(_UpperCamelCase ) # No of vertices in graph
lowercase_ : Union[str, Any] = [0] * n
lowercase_ : Optional[int] = [False] * n
def dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowercase_ : Union[str, Any] = True
lowercase_ : Dict = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , id_ )
lowercase_ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase_ : Optional[int] = min(low[at] , low[to] )
lowercase_ : list[tuple[int, int]] = []
for i in range(_UpperCamelCase ):
if not visited[i]:
dfs(_UpperCamelCase , -1 , _UpperCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCamelCase__ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = set()
lowercase_ : str = []
def parse_line(_UpperCamelCase ):
for line in fp:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowercase_ : Optional[int] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(_UpperCamelCase ) > 0:
lowercase_ : List[str] = "\n".join(_UpperCamelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_UpperCamelCase )
buffer.clear()
continue
else:
lowercase_ : str = line.strip()
buffer.append(_UpperCamelCase )
if from_gh:
for filename in os.listdir(_UpperCamelCase ):
lowercase_ : List[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
else:
try:
with zipfile.ZipFile(_UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_UpperCamelCase ) as fp:
parse_line(_UpperCamelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = set()
lowercase_ : Dict = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for p in os.listdir(_UpperCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_UpperCamelCase , _UpperCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return values.split("," )
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCamelCase__ = extract_warnings(args.output_dir, args.targets)
UpperCamelCase__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 640
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase__ = 'scheduler_config.json'
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: int = 1
__lowerCamelCase: List[Any] = 2
__lowerCamelCase: Optional[Any] = 3
__lowerCamelCase: int = 4
__lowerCamelCase: Optional[int] = 5
@dataclass
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: jnp.ndarray
class _UpperCAmelCase :
__lowerCamelCase: List[str] = SCHEDULER_CONFIG_NAME
__lowerCamelCase: Optional[int] = ['dtype']
__lowerCamelCase: int = []
__lowerCamelCase: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Dict[str, Any] = None , a : Optional[str] = None , a : Union[str, Any]=False , **a : Union[str, Any] , ):
'''simple docstring'''
lowercase_ , lowercase_ : Any = cls.load_config(
pretrained_model_name_or_path=a , subfolder=a , return_unused_kwargs=a , **a , )
lowercase_ , lowercase_ : Union[str, Any] = cls.from_config(a , return_unused_kwargs=a , **a )
if hasattr(a , "create_state" ) and getattr(a , "has_state" , a ):
lowercase_ : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__ ( self : int , a : Union[str, os.PathLike] , a : bool = False , **a : int ):
'''simple docstring'''
self.save_config(save_directory=a , push_to_hub=a , **a )
@property
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ : str = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ : str = importlib.import_module(__name__.split("." )[0] )
lowercase_ : Optional[Any] = [
getattr(a , a ) for c in compatible_classes_str if hasattr(a , a )
]
return compatible_classes
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
assert len(_UpperCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCamelCase ) - x.ndim) ) , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=0.999 , _UpperCamelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCamelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase_ : int = []
for i in range(_UpperCamelCase ):
lowercase_ : Union[str, Any] = i / num_diffusion_timesteps
lowercase_ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCamelCase ) / alpha_bar(_UpperCamelCase ) , _UpperCamelCase ) )
return jnp.array(_UpperCamelCase , dtype=_UpperCamelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
__lowerCamelCase: jnp.ndarray
@classmethod
def lowerCAmelCase__ ( cls : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = scheduler.config
if config.trained_betas is not None:
lowercase_ : Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ : List[Any] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : Tuple = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Union[str, Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase_ : str = 1.0 - betas
lowercase_ : Dict = jnp.cumprod(a , axis=0 )
return cls(
alphas=a , betas=a , alphas_cumprod=a , )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[str] = state.alphas_cumprod
lowercase_ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5
lowercase_ : int = sqrt_alpha_prod.flatten()
lowercase_ : Union[str, Any] = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
lowercase_ : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase_ : Any = broadcast_to_shape_from_left(_UpperCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : int = get_sqrt_alpha_prod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 640
| 1
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase_ : Union[str, Any] = default
else:
# KEY is set, convert it to True or False.
try:
lowercase_ : Tuple = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skip("Test was skipped" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase=None , _UpperCamelCase=None ):
"""simple docstring"""
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _UpperCamelCase ) , F"""test requires torch version >= {version}""" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_UpperCamelCase )
UpperCamelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCamelCase )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Union[str, Any] = True
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
'''simple docstring'''
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a )
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCAmelCase__ ( self : List[Any] , a : Union[mock.Mock, List[mock.Mock]] ):
'''simple docstring'''
lowercase_ : int = mocks if isinstance(a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Optional[Any] = AcceleratorState()
lowercase_ : List[Any] = tensor[None].clone().to(state.device )
lowercase_ : Any = gather(_UpperCamelCase ).cpu()
lowercase_ : Union[str, Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str , a : Any , a : List[Any] ):
'''simple docstring'''
lowercase_ : Dict = returncode
lowercase_ : str = stdout
lowercase_ : List[Any] = stderr
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
while True:
lowercase_ : Optional[int] = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ):
"""simple docstring"""
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
lowercase_ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase_ : Tuple = []
lowercase_ : Any = []
def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
lowercase_ : List[Any] = line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ):
"""simple docstring"""
lowercase_ : Dict = asyncio.get_event_loop()
lowercase_ : List[str] = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
lowercase_ : Optional[int] = " ".join(_UpperCamelCase )
if result.returncode > 0:
lowercase_ : str = "\n".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class _UpperCAmelCase ( snake_case ):
pass
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase=False ):
"""simple docstring"""
try:
lowercase_ : List[Any] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , "decode" ):
lowercase_ : List[str] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 640
|
'''simple docstring'''
import heapq
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
lowercase_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_UpperCamelCase , [-1 * len(_UpperCamelCase ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase_ : Optional[Any] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase_ : Any = heapq.heappop(_UpperCamelCase )[1][0]
chosen_vertices.add(_UpperCamelCase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase_ : str = elem[1][1].index(_UpperCamelCase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_UpperCamelCase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 640
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCamelCase__ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCamelCase__ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCamelCase__ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCamelCase__ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Any = FLAX_MODEL_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModel)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: List[str] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class _UpperCAmelCase ( _BaseAutoModelClass ):
__lowerCamelCase: Tuple = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 640
|
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 640
| 1
|
'''simple docstring'''
UpperCamelCase__ = 256
# Modulus to hash a string
UpperCamelCase__ = 1000003
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : Tuple = len(_UpperCamelCase )
lowercase_ : Tuple = len(_UpperCamelCase )
if p_len > t_len:
return False
lowercase_ : str = 0
lowercase_ : List[str] = 0
lowercase_ : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
lowercase_ : Dict = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase_ : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase_ : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase_ : str = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowercase_ : int = "abc1abc12"
lowercase_ : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase_ : Dict = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) and not rabin_karp(_UpperCamelCase , _UpperCamelCase )
# Test 2)
lowercase_ : Tuple = "ABABX"
lowercase_ : Optional[int] = "ABABZABABYABABX"
assert rabin_karp(_UpperCamelCase , _UpperCamelCase )
# Test 3)
lowercase_ : Optional[int] = "AAAB"
lowercase_ : Any = "ABAAAAAB"
assert rabin_karp(_UpperCamelCase , _UpperCamelCase )
# Test 4)
lowercase_ : Union[str, Any] = "abcdabcy"
lowercase_ : int = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_UpperCamelCase , _UpperCamelCase )
# Test 5)
lowercase_ : str = "Lü"
lowercase_ : Tuple = "Lüsai"
assert rabin_karp(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Any = "Lue"
assert not rabin_karp(_UpperCamelCase , _UpperCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 640
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
lowercase_ : Tuple = XLNetConfig.from_json_file(_UpperCamelCase )
lowercase_ : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
lowercase_ : Dict = finetuning_task
lowercase_ : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowercase_ : Any = XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
lowercase_ : Optional[int] = finetuning_task
lowercase_ : Optional[int] = XLNetForQuestionAnswering(_UpperCamelCase )
else:
lowercase_ : Union[str, Any] = XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
lowercase_ : Optional[Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 640
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: List[str] = 'convbert'
def __init__( self : Optional[Any] , a : Tuple=3_0_5_2_2 , a : Dict=7_6_8 , a : Any=1_2 , a : List[Any]=1_2 , a : Any=3_0_7_2 , a : Optional[Any]="gelu" , a : int=0.1 , a : Optional[int]=0.1 , a : Tuple=5_1_2 , a : int=2 , a : str=0.02 , a : Union[str, Any]=1e-12 , a : Tuple=1 , a : Tuple=0 , a : Any=2 , a : Any=7_6_8 , a : Tuple=2 , a : Any=9 , a : str=1 , a : str=None , **a : List[str] , ):
'''simple docstring'''
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowercase_ : Any = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : int = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : Dict = embedding_size
lowercase_ : int = head_ratio
lowercase_ : Tuple = conv_kernel_size
lowercase_ : Tuple = num_groups
lowercase_ : int = classifier_dropout
class _UpperCAmelCase ( snake_case ):
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("String lengths must match!" )
lowercase_ : Optional[Any] = 0
for chara, chara in zip(_UpperCamelCase , _UpperCamelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
|
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ = 50003
UpperCamelCase__ = 50002
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( snake_case , unittest.TestCase ):
__lowerCamelCase: Optional[int] = PLBartTokenizer
__lowerCamelCase: Any = None
__lowerCamelCase: Dict = False
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Any = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[str] = PLBartTokenizer(a , language_codes="base" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : str = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 4 , a )]
self.assertListEqual(a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowercase_ : int = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : str = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = PLBartTokenizer(a , language_codes="multi" , keep_accents=a )
lowercase_ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(a ) for x in range(end - 7 , a )]
self.assertListEqual(
a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowercase_ : Any = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowercase_ : List[Any] = tokenizer(a ).input_ids
self.assertEqual(
tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) , a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: int = 'uclanlp/plbart-python-en_XX'
__lowerCamelCase: Tuple = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__lowerCamelCase: List[str] = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__lowerCamelCase: List[str] = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
lowercase_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowercase_ : List[str] = 1
return cls
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a )
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(a , self.tokenizer.all_special_ids )
lowercase_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
lowercase_ : Optional[int] = self.tokenizer.decode(a , skip_special_tokens=a )
lowercase_ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a )
self.assertEqual(a , a )
self.assertNotIn(self.tokenizer.eos_token , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , a )
lowercase_ : Tuple = 1_0
lowercase_ : int = self.tokenizer(a , max_length=a , truncation=a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , a )
self.assertEqual(len(a ) , a )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ : Optional[int] = tempfile.mkdtemp()
lowercase_ : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a )
lowercase_ : Tuple = PLBartTokenizer.from_pretrained(a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a , return_tensors="pt" )
lowercase_ : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a , truncation=a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase_ : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(a , a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer(self.src_text , padding=a , truncation=a , max_length=3 , return_tensors="pt" )
lowercase_ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=a , truncation=a , max_length=1_0 , return_tensors="pt" )
lowercase_ : Dict = targets["input_ids"]
lowercase_ : str = shift_tokens_right(a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 640
| 1
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase="ro" , _UpperCamelCase="en" , _UpperCamelCase="wmt16" , _UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
lowercase_ : Dict = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
lowercase_ : int = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase )
if save_dir is None:
lowercase_ : Optional[int] = F"""{dataset}-{pair}"""
lowercase_ : Optional[Any] = Path(_UpperCamelCase )
save_dir.mkdir(exist_ok=_UpperCamelCase )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowercase_ : Any = "val" if split == "validation" else split
lowercase_ : Tuple = save_dir.joinpath(F"""{fn}.source""" )
lowercase_ : Dict = save_dir.joinpath(F"""{fn}.target""" )
lowercase_ : int = src_path.open("w+" )
lowercase_ : List[str] = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowercase_ : int = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 640
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 640
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640
| 1
|
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'Salesforce/codegen-350M-mono': 2048,
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: Tuple = ['input_ids', 'attention_mask']
__lowerCamelCase: Tuple = CodeGenTokenizer
def __init__( self : Dict , a : Optional[Any]=None , a : Any=None , a : List[Any]=None , a : int="<|endoftext|>" , a : Optional[Any]="<|endoftext|>" , a : Union[str, Any]="<|endoftext|>" , a : Any=False , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , add_prefix_space=a , **a , )
if kwargs.pop("add_bos_token" , a ):
lowercase_ : int = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
lowercase_ : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : List[Any] = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : Any = add_prefix_space
lowercase_ : List[Any] = pre_tok_class(**a )
lowercase_ : int = add_prefix_space
def lowerCAmelCase__ ( self : Tuple , *a : List[str] , **a : List[Any] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Dict , *a : Optional[Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : Dict = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Optional[Any] = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : Optional[int] , a : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , a : bool = False , a : bool = None , a : Optional[List[str]] = None , **a : Optional[Any] , ):
'''simple docstring'''
lowercase_ : Optional[Any] = super().decode(
token_ids=a , skip_special_tokens=a , clean_up_tokenization_spaces=a , **a , )
if truncate_before_pattern is not None and len(a ) > 0:
lowercase_ : Optional[Any] = self.truncate(a , a )
return decoded_text
def lowerCAmelCase__ ( self : Optional[int] , a : str , a : List[Any] ):
'''simple docstring'''
def find_re(a : Tuple , a : List[str] , a : Optional[int] ):
lowercase_ : List[Any] = pattern.search(a , a )
return m.start() if m else -1
lowercase_ : Tuple = [re.compile(a , re.MULTILINE ) for pattern in truncate_before_pattern]
lowercase_ : int = list(re.finditer("^print" , a , re.MULTILINE ) )
if len(a ) > 1:
lowercase_ : Dict = completion[: prints[1].start()]
lowercase_ : Union[str, Any] = list(re.finditer("^def" , a , re.MULTILINE ) )
if len(a ) > 1:
lowercase_ : Optional[Any] = completion[: defs[1].start()]
lowercase_ : Optional[int] = 0
lowercase_ : Union[str, Any] = [
pos for pos in [find_re(a , a , a ) for terminal in terminals] if pos != -1
]
if len(a ) > 0:
return completion[: min(a )]
else:
return completion
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.