code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A_ : Dict = logging.getLogger(__name__)
A_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
A_ : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase__ )} , )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ : Optional[str] = field(
default=lowerCAmelCase__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
lowerCamelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
lowerCamelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ : bool = field(default=lowerCAmelCase__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
lowerCamelCase__ : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
lowerCamelCase__ : bool = field(
default=lowerCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , ):
def _dataset(_lowerCamelCase , _lowerCamelCase=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_A , file_path=_A , block_size=args.block_size , ref_path=_A , )
return LineByLineTextDataset(tokenizer=_A , file_path=_A , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_A , file_path=_A , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_A , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_A ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Any = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase__ : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase__ : List[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCamelCase__ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCamelCase__ : List[str] = AutoModelWithLMHead.from_config(_A )
model.resize_token_embeddings(len(_A ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCamelCase__ : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase__ : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCamelCase__ : Union[str, Any] = (
get_dataset(_A , tokenizer=_A , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase__ : Any = (
get_dataset(_A , tokenizer=_A , evaluate=_A , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase__ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=_A , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase__ : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_A , mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase__ : Tuple = DataCollatorForLanguageModeling(
tokenizer=_A , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Dict = Trainer(
model=_A , args=_A , data_collator=_A , train_dataset=_A , eval_dataset=_A , prediction_loss_only=_A , )
# Training
if training_args.do_train:
lowerCamelCase__ : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_A )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase__ : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ : Union[str, Any] = trainer.evaluate()
lowerCamelCase__ : Dict = math.exp(eval_output['eval_loss'] )
lowerCamelCase__ : int = {'perplexity': perplexity}
lowerCamelCase__ : Tuple = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _A , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_A )
return results
def lowerCamelCase_ ( _lowerCamelCase ):
main()
if __name__ == "__main__":
main()
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A_ : Dict = False
try:
A_ : Any = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ = None, lowerCamelCase_ = [] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : List[str] = choices
lowerCamelCase__ : List[Any] = prompt
if sys.platform == "win32":
lowerCamelCase__ : Optional[int] = '*'
else:
lowerCamelCase__ : Union[str, Any] = '➔ '
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index], 3_2, lowerCamelCase_ )
else:
forceWrite(self.choices[index], lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(lowerCamelCase_ )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = 1 ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCamelCase_ )
move_cursor(lowerCamelCase_, direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def a__ (self ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def a__ (self ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def a__ (self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position, 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def a__ (self ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position, 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCamelCase_ )] for number in range(1_0 )] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = int(chr(self.current_selection ) )
lowerCamelCase__ : Optional[Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP, -movement )
elif self.position < index:
self.move_direction(Direction.DOWN, lowerCamelCase_ )
else:
return
else:
return
def a__ (self, lowerCamelCase_ = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt, '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter', '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter', '\n' )
lowerCamelCase__ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCamelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position, 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCamelCase__ : Tuple = int(builtins.input() )
except ValueError:
lowerCamelCase__ : List[str] = default_choice
else:
lowerCamelCase__ : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1, 'UP' )
clear_line()
self.write_choice(lowerCamelCase_, '\n' )
return choice
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a_ ( __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCamelCase__ : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCamelCase__ : Tuple = "document_qa"
lowerCamelCase__ : Optional[int] = AutoProcessor
lowerCamelCase__ : List[str] = VisionEncoderDecoderModel
lowerCamelCase__ : Union[str, Any] = ["image", "text"]
lowerCamelCase__ : List[Any] = ["text"]
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*lowerCamelCase__, **lowerCamelCase__ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowerCamelCase__ : str = task_prompt.replace('{user_input}', lowerCamelCase__ )
lowerCamelCase__ : Any = self.pre_processor.tokenizer(
lowerCamelCase__, add_special_tokens=lowerCamelCase__, return_tensors='pt' ).input_ids
lowerCamelCase__ : List[Any] = self.pre_processor(lowerCamelCase__, return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.model.generate(
inputs['pixel_values'].to(self.device ), decoder_input_ids=inputs['decoder_input_ids'].to(self.device ), max_length=self.model.decoder.config.max_position_embeddings, early_stopping=lowerCamelCase__, pad_token_id=self.pre_processor.tokenizer.pad_token_id, eos_token_id=self.pre_processor.tokenizer.eos_token_id, use_cache=lowerCamelCase__, num_beams=1, bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]], return_dict_in_generate=lowerCamelCase__, ).sequences
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.pre_processor.batch_decode(lowerCamelCase__ )[0]
lowerCamelCase__ : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token, '' )
lowerCamelCase__ : Dict = sequence.replace(self.pre_processor.tokenizer.pad_token, '' )
lowerCamelCase__ : Dict = re.sub(r'<.*?>', '', lowerCamelCase__, count=1 ).strip() # remove first task start token
lowerCamelCase__ : Tuple = self.pre_processor.tokenajson(lowerCamelCase__ )
return sequence["answer"]
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = emb.weight.shape
lowerCamelCase__ : Optional[int] = nn.Linear(A__ , A__ , bias=A__ )
lowerCamelCase__ : List[str] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ : List[str] = {}
for old_key in state_dict.keys():
lowerCamelCase__ : List[Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase__ : Optional[int] = key.replace('moe_layer.experts.0' , f'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase__ : Union[str, Any] = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
lowerCamelCase__ : str = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
lowerCamelCase__ : List[Any] = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
lowerCamelCase__ : int = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
lowerCamelCase__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase__ : Optional[int] = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
lowerCamelCase__ : Tuple = key.replace('final_layer_norm' , 'ff_layer_norm' )
lowerCamelCase__ : Tuple = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = WEIGHTS_NAME ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
lowerCamelCase__ : str = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(A__ ):
lowerCamelCase__ : Dict = torch.load(A__ )['model']
remove_ignore_keys_(A__ )
lowerCamelCase__ : Tuple = rename_fairseq_keys(A__ , A__ )
lowerCamelCase__ : List[Any] = os.path.join(
A__ , weights_name.replace('.bin' , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
lowerCamelCase__ : Union[str, Any] = os.path.join(A__ , weights_name.replace('.bin' , f'''-{len(A__ )+1:05d}-of-???.bin''' ) )
lowerCamelCase__ : Optional[Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A__ )
lowerCamelCase__ : str = rename_fairseq_keys(A__ , A__ )
lowerCamelCase__ : Any = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
lowerCamelCase__ : int = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
lowerCamelCase__ : List[str] = {}
for idx, shard in enumerate(A__ ):
lowerCamelCase__ : Any = weights_name.replace('.bin' , f'''-{idx+1:05d}-of-{len(A__ ):05d}.bin''' )
lowerCamelCase__ : List[Any] = os.path.join(A__ , weights_name.replace('.bin' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
lowerCamelCase__ : str = shard_file
# Add the metadata
lowerCamelCase__ : Optional[int] = {'total_size': total_size}
lowerCamelCase__ : str = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A__ , A__ ) , 'w' , encoding='utf-8' ) as f:
lowerCamelCase__ : Dict = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '\n'
f.write(A__ )
return metadata, index
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
A_ : List[str] = parser.parse_args()
A_, A_ : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
A_ : List[str] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
A_ : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = pipeline(
task='zero-shot-audio-classification', model='hf-internal-testing/tiny-clap-htsat-unfused' )
lowerCamelCase__ : List[str] = load_dataset('ashraq/esc50' )
lowerCamelCase__ : str = dataset['train']['audio'][-1]['array']
lowerCamelCase__ : int = audio_classifier(lowerCamelCase_, candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}], )
@unittest.skip('No models are available in TF' )
def a__ (self ):
'''simple docstring'''
pass
@slow
@require_torch
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = pipeline(
task='zero-shot-audio-classification', model='laion/clap-htsat-unfused', )
# This is an audio of a dog
lowerCamelCase__ : Tuple = load_dataset('ashraq/esc50' )
lowerCamelCase__ : Tuple = dataset['train']['audio'][-1]['array']
lowerCamelCase__ : int = audio_classifier(lowerCamelCase_, candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
], )
lowerCamelCase__ : Optional[Any] = audio_classifier([audio] * 5, candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5, )
lowerCamelCase__ : int = audio_classifier(
[audio] * 5, candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'], batch_size=5 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ), [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5, )
@unittest.skip('No models are available in TF' )
def a__ (self ):
'''simple docstring'''
pass
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = math.inf , _lowerCamelCase = -math.inf , _lowerCamelCase = math.inf , _lowerCamelCase = -math.inf , _lowerCamelCase = False , _lowerCamelCase = 100 , _lowerCamelCase = 0.01 , _lowerCamelCase = 1 , ):
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = search_prob
lowerCamelCase__ : Any = start_temperate
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Tuple = None
while not search_end:
lowerCamelCase__ : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
lowerCamelCase__ : Union[str, Any] = current_state
scores.append(a__ )
iterations += 1
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Tuple = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowerCamelCase__ : int = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
lowerCamelCase__ : Optional[Any] = neighbors.pop(a__ )
lowerCamelCase__ : int = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowerCamelCase__ : Tuple = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowerCamelCase__ : Tuple = picked_neighbor
else:
lowerCamelCase__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowerCamelCase__ : Union[str, Any] = picked_neighbor
lowerCamelCase__ : List[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowerCamelCase__ : Union[str, Any] = True
else:
lowerCamelCase__ : List[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A_ : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : Tuple = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
A_ : Union[str, Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
A_ : List[Any] = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return (3 * x**2) - (6 * y)
A_ : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"{local_min.score()}"
)
A_ : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f"{local_min.score()}"
)
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _lowerCamelCase ):
return (data["data"], data["target"])
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = XGBClassifier()
classifier.fit(_lowerCamelCase , _lowerCamelCase )
return classifier
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = load_iris()
lowerCamelCase__ , lowerCamelCase__ : List[str] = data_handling(_lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = train_test_split(
_lowerCamelCase , _lowerCamelCase , test_size=0.25 )
lowerCamelCase__ : Optional[Any] = iris['target_names']
# Create an XGBoost Classifier from the training data
lowerCamelCase__ : List[str] = xgboost(_lowerCamelCase , _lowerCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , display_labels=_lowerCamelCase , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A_ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
A_ : Tuple = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
A_ : List[str] = ""
A_ : Dict = ""
A_ : List[Any] = ""
A_ : str = ""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = tweepy.OAuthHandler(_a , _a )
auth.set_access_token(_a , _a )
lowerCamelCase__ : List[str] = tweepy.API(_a )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase__ : Tuple = api.user_timeline(screen_name=_a , count=200 )
# save most recent tweets
alltweets.extend(_a )
# save the id of the oldest tweet less one
lowerCamelCase__ : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_a ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase__ : List[str] = api.user_timeline(
screen_name=_a , count=200 , max_id=_a )
# save most recent tweets
alltweets.extend(_a )
# update the id of the oldest tweet less one
lowerCamelCase__ : Tuple = alltweets[-1].id - 1
print(f'''...{len(_a )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase__ : Dict = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , 'w' ) as f:
lowerCamelCase__ : Tuple = csv.writer(_a )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_a )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a_ :
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = UNetaDConditionModel(
sample_size=3_2, layers_per_block=1, block_out_channels=[3_2, 6_4], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=3, out_channels=6, cross_attention_dim=3_2, encoder_hid_dim=3_2, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase__ : int = DDPMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='squaredcos_cap_v2', beta_start=0.0_001, beta_end=0.02, thresholding=_lowercase, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : int = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2, layers_per_block=[1, 2], block_out_channels=[3_2, 6_4], down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
], mid_block_type='UNetMidBlock2DSimpleCrossAttn', up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'], in_channels=6, out_channels=6, cross_attention_dim=3_2, encoder_hid_dim=3_2, attention_head_dim=8, addition_embed_type='text', addition_embed_type_num_heads=2, cross_attention_norm='group_norm', resnet_time_scale_shift='scale_shift', act_fn='gelu', class_embed_type='timestep', mid_block_scale_factor=1.414, time_embedding_act_fn='gelu', time_embedding_dim=3_2, )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCamelCase__ : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='squaredcos_cap_v2', beta_start=0.0_001, beta_end=0.02, thresholding=_lowercase, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='epsilon', variance_type='learned_range', )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='squaredcos_cap_v2', beta_start=0.0_001, beta_end=0.02, )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase__ : int = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowerCamelCase__ : List[str] = self.get_dummy_inputs(_lowercase )
lowerCamelCase__ : Any = inputs['prompt']
lowerCamelCase__ : str = inputs['generator']
lowerCamelCase__ : Optional[Any] = inputs['num_inference_steps']
lowerCamelCase__ : Optional[Any] = inputs['output_type']
if "image" in inputs:
lowerCamelCase__ : Union[str, Any] = inputs['image']
else:
lowerCamelCase__ : List[str] = None
if "mask_image" in inputs:
lowerCamelCase__ : Optional[Any] = inputs['mask_image']
else:
lowerCamelCase__ : List[Any] = None
if "original_image" in inputs:
lowerCamelCase__ : Optional[int] = inputs['original_image']
else:
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ , lowerCamelCase__ : List[Any] = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowerCamelCase__ : Optional[int] = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCamelCase__ : Optional[Any] = image
if mask_image is not None:
lowerCamelCase__ : Any = mask_image
if original_image is not None:
lowerCamelCase__ : Dict = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase, _lowercase, _lowercase )
lowerCamelCase__ : Union[str, Any] = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowerCamelCase__ : Union[str, Any] = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase, _lowercase ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
lowerCamelCase__ : str = self.get_dummy_inputs(_lowercase )
lowerCamelCase__ : Dict = inputs['generator']
lowerCamelCase__ : List[Any] = inputs['num_inference_steps']
lowerCamelCase__ : Dict = inputs['output_type']
# inputs with prompt converted to embeddings
lowerCamelCase__ : Dict = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
lowerCamelCase__ : Dict = image
if mask_image is not None:
lowerCamelCase__ : List[str] = mask_image
if original_image is not None:
lowerCamelCase__ : Tuple = original_image
lowerCamelCase__ : Tuple = pipe_loaded(**_lowercase )[0]
lowerCamelCase__ : str = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase, 1e-4 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(_lowercase )
lowerCamelCase__ : str = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowerCamelCase__ : Dict = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCamelCase__ : Dict = self.get_dummy_inputs(_lowercase )
lowerCamelCase__ : List[str] = pipe_loaded(**_lowercase )[0]
lowerCamelCase__ : Union[str, Any] = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase, 1e-4 )
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import string
from math import logaa
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
lowerCamelCase__ : Tuple = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase__ : Tuple = corpus_without_punctuation.split('\n' )
lowerCamelCase__ : Dict = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return round(tf * idf , 3 )
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class a_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = ['torch', 'scipy']
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['torch', 'scipy'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['torch', 'scipy'] )
@classmethod
def a__ (cls, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(cls, ['torch', 'scipy'] )
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
A_ : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = [False] * len(_lowerCamelCase )
lowerCamelCase__ : Any = [s]
lowerCamelCase__ : Dict = True
while queue:
lowerCamelCase__ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : List[str] = u
return visited[t]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[Any] = [-1] * (len(_lowerCamelCase ))
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = float('Inf' )
lowerCamelCase__ : List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase__ : int = min(_lowerCamelCase , graph[parent[s]][s] )
lowerCamelCase__ : Tuple = parent[s]
max_flow += path_flow
lowerCamelCase__ : Optional[int] = sink
while v != source:
lowerCamelCase__ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase__ : Tuple = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
A_ : str = get_logger(__name__)
A_ : Any = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class a_ :
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a_ :
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class a_ ( snake_case_ ):
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
for processor in self:
lowerCamelCase__ : Any = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
lowerCamelCase__ : Any = processor(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : Union[str, Any] = processor(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
lowerCamelCase__ : Optional[int] = temperature
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = scores / self.temperature
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = -float('Inf' ), lowerCamelCase_ = 1 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
lowerCamelCase__ : int = top_p
lowerCamelCase__ : Any = filter_value
lowerCamelCase__ : str = min_tokens_to_keep
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = lax.top_k(_SCREAMING_SNAKE_CASE, scores.shape[-1] )
lowerCamelCase__ : int = jnp.full_like(_SCREAMING_SNAKE_CASE, self.filter_value )
lowerCamelCase__ : str = jax.nn.softmax(_SCREAMING_SNAKE_CASE, axis=-1 ).cumsum(axis=-1 )
lowerCamelCase__ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCamelCase__ : Tuple = jnp.roll(_SCREAMING_SNAKE_CASE, 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
lowerCamelCase__ : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = jnp.where(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : int = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ = -float('Inf' ), lowerCamelCase_ = 1 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
lowerCamelCase__ : Tuple = max(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Any = filter_value
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = scores.shape
lowerCamelCase__ : int = jnp.full(batch_size * vocab_size, self.filter_value )
lowerCamelCase__ : Optional[int] = min(self.top_k, scores.shape[-1] ) # Safety check
lowerCamelCase__ : Union[str, Any] = lax.top_k(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None], (batch_size, topk) ).flatten()
lowerCamelCase__ : Tuple = topk_scores.flatten()
lowerCamelCase__ : List[str] = topk_indices.flatten() + shift
lowerCamelCase__ : Optional[int] = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[str] = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
return next_scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = bos_token_id
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = jnp.full(scores.shape, -float('inf' ) )
lowerCamelCase__ : Dict = 1 - jnp.bool_(cur_len - 1 )
lowerCamelCase__ : Tuple = jnp.where(_SCREAMING_SNAKE_CASE, new_scores.at[:, self.bos_token_id].set(0 ), _SCREAMING_SNAKE_CASE )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = max_length
lowerCamelCase__ : Any = eos_token_id
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = jnp.full(scores.shape, -float('inf' ) )
lowerCamelCase__ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCamelCase__ : Tuple = jnp.where(_SCREAMING_SNAKE_CASE, new_scores.at[:, self.eos_token_id].set(0 ), _SCREAMING_SNAKE_CASE )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
lowerCamelCase__ : str = min_length
lowerCamelCase__ : int = eos_token_id
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = 1 - jnp.clip(cur_len - self.min_length, 0, 1 )
lowerCamelCase__ : Dict = jnp.where(_SCREAMING_SNAKE_CASE, scores.at[:, self.eos_token_id].set(-float('inf' ) ), _SCREAMING_SNAKE_CASE )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = list(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[int] = begin_index
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCamelCase__ : Any = jnp.where(_SCREAMING_SNAKE_CASE, scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ), _SCREAMING_SNAKE_CASE )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(_SCREAMING_SNAKE_CASE )
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCamelCase__ : List[str] = jnp.ones((max(force_token_map.keys() ) + 1), dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCamelCase__ : List[Any] = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
def _force_token(lowerCamelCase_ ):
lowerCamelCase__ : str = scores.shape[0]
lowerCamelCase__ : Tuple = self.force_token_array[generation_idx]
lowerCamelCase__ : Optional[int] = jnp.ones_like(_SCREAMING_SNAKE_CASE, dtype=scores.dtype ) * -float('inf' )
lowerCamelCase__ : Dict = jnp.zeros((batch_size, 1), dtype=scores.dtype )
lowerCamelCase__ : Dict = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, (0, current_token) )
return new_scores
lowerCamelCase__ : Optional[Any] = lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(_SCREAMING_SNAKE_CASE ), lambda: scores, ), )
return scores
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = generate_config.eos_token_id
lowerCamelCase__ : Optional[int] = generate_config.no_timestamps_token_id
lowerCamelCase__ : List[Any] = generate_config.no_timestamps_token_id + 1
lowerCamelCase__ : List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE, 'max_initial_timestamp_index' ):
lowerCamelCase__ : List[Any] = generate_config.max_initial_timestamp_index
else:
lowerCamelCase__ : List[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCamelCase__ : int = model_config.vocab_size
def __call__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = jnp.where((cur_len - self.begin_index) >= 1, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[int] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, _SCREAMING_SNAKE_CASE, )
lowerCamelCase__ : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, )
return jnp.where(
_SCREAMING_SNAKE_CASE, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float('inf' ) ), scores_k.at[: self.eos_token_id].set(-float('inf' ) ), ), _SCREAMING_SNAKE_CASE, )
lowerCamelCase__ : Any = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[str] = jnp.where(cur_len == self.begin_index, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[Any] = jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, _SCREAMING_SNAKE_CASE, )
lowerCamelCase__ : Any = self.timestamp_begin + self.max_initial_timestamp_index
lowerCamelCase__ : Tuple = jnp.where(
_SCREAMING_SNAKE_CASE, scores.at[:, last_allowed + 1 :].set(-float('inf' ) ), _SCREAMING_SNAKE_CASE, )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCamelCase__ : Optional[int] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE, axis=-1 )
def handle_cumulative_probs(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1 )
lowerCamelCase__ : Optional[int] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float('inf' ) ), _SCREAMING_SNAKE_CASE, )
lowerCamelCase__ : str = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE )
return scores
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowerCamelCase_ ( _lowerCamelCase ):
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
A_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[Any] = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class a_ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'gpt_bigcode'
lowerCamelCase__ : Optional[Any] = ['past_key_values']
lowerCamelCase__ : str = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__(self, lowerCamelCase_=5_0_2_5_7, lowerCamelCase_=1_0_2_4, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=None, lowerCamelCase_="gelu_pytorch_tanh", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1e-5, lowerCamelCase_=0.02, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=5_0_2_5_6, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Optional[Any] = n_positions
lowerCamelCase__ : List[Any] = n_embd
lowerCamelCase__ : Optional[int] = n_layer
lowerCamelCase__ : Dict = n_head
lowerCamelCase__ : Optional[Any] = n_inner
lowerCamelCase__ : str = activation_function
lowerCamelCase__ : List[str] = resid_pdrop
lowerCamelCase__ : str = embd_pdrop
lowerCamelCase__ : str = attn_pdrop
lowerCamelCase__ : List[Any] = layer_norm_epsilon
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Dict = scale_attn_weights
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : Union[str, Any] = attention_softmax_in_fpaa
lowerCamelCase__ : Optional[int] = scale_attention_softmax_in_fpaa
lowerCamelCase__ : Optional[Any] = multi_query
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : List[Any] = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
A_ : Optional[Any] = list[tuple[int, int]]
A_ : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A_ : List[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : str = pos_x
lowerCamelCase__ : Optional[Any] = pos_y
lowerCamelCase__ : Tuple = (pos_y, pos_x)
lowerCamelCase__ : List[Any] = goal_x
lowerCamelCase__ : List[str] = goal_y
lowerCamelCase__ : List[str] = g_cost
lowerCamelCase__ : str = parent
lowerCamelCase__ : Union[str, Any] = self.calculate_heuristic()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = abs(self.pos_x - self.goal_x )
lowerCamelCase__ : List[str] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__(self, lowerCamelCase_ ):
'''simple docstring'''
return self.f_cost < other.f_cost
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = Node(start[1], start[0], goal[1], goal[0], 0, lowerCamelCase_ )
lowerCamelCase__ : str = Node(goal[1], goal[0], goal[1], goal[0], 9_9_9_9_9, lowerCamelCase_ )
lowerCamelCase__ : str = [self.start]
lowerCamelCase__ : list[Node] = []
lowerCamelCase__ : Optional[int] = False
def a__ (self ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ : Dict = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase__ : str = True
return self.retrace_path(lowerCamelCase_ )
self.closed_nodes.append(lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.get_successors(lowerCamelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase_ )
else:
# retrieve the best current path
lowerCamelCase__ : int = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase_ )
else:
self.open_nodes.append(lowerCamelCase_ )
if not self.reached:
return [self.start.pos]
return None
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = []
for action in delta:
lowerCamelCase__ : str = parent.pos_x + action[1]
lowerCamelCase__ : Any = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase_, lowerCamelCase_, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowerCamelCase_, ) )
return successors
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = node
lowerCamelCase__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ : Tuple = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
A_ : Optional[Any] = (0, 0)
A_ : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
A_ : Any = GreedyBestFirst(init, goal)
A_ : int = greedy_bf.search()
if path:
for pos_x, pos_y in path:
A_ : Optional[Any] = 2
for elem in grid:
print(elem)
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A_ : Optional[int] = True
except (ImportError, ModuleNotFoundError):
A_ : List[str] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowerCamelCase_ ( _lowerCamelCase ):
re.sub('<n>' , '' , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A_ : Any = get_logger(__name__)
A_ : Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
A_ : List[Any] = uuida().hex
A_ : List[str] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
A_ : str = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
A_ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase_ ( _lowerCamelCase = None ):
lowerCamelCase__ : Any = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
ua += "; " + user_agent
return ua
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
if token is None:
lowerCamelCase__ : List[str] = HfFolder.get_token()
if organization is None:
lowerCamelCase__ : Optional[int] = whoami(__UpperCAmelCase )['name']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__UpperCAmelCase , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
lowerCamelCase__ : int = args.hub_token if hasattr(__UpperCAmelCase , 'hub_token' ) else None
lowerCamelCase__ : Optional[int] = get_full_repo_name(__UpperCAmelCase , token=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCAmelCase , model_name=__UpperCAmelCase , repo_name=__UpperCAmelCase , dataset_name=args.dataset_name if hasattr(__UpperCAmelCase , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCAmelCase , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCAmelCase , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCAmelCase , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCAmelCase , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCAmelCase , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCAmelCase , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCAmelCase , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__UpperCAmelCase , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCAmelCase , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
lowerCamelCase__ : Tuple = os.path.join(args.output_dir , 'README.md' )
model_card.save(__UpperCAmelCase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
lowerCamelCase__ : List[str] = str(Path(__UpperCAmelCase ).as_posix() )
lowerCamelCase__ : Any = re.search(r'snapshots/([^/]+)/' , __UpperCAmelCase )
if search is None:
return None
lowerCamelCase__ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCAmelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A_ : str = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
A_ : Optional[Any] = os.path.join(hf_cache_home, "diffusers")
def lowerCamelCase_ ( _lowerCamelCase = None , _lowerCamelCase = None ):
if new_cache_dir is None:
lowerCamelCase__ : List[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowerCamelCase__ : str = old_diffusers_cache
lowerCamelCase__ : List[str] = Path(__UpperCAmelCase ).expanduser()
lowerCamelCase__ : Any = Path(__UpperCAmelCase ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowerCamelCase__ : str = new_cache_dir / old_blob_path.relative_to(__UpperCAmelCase )
new_blob_path.parent.mkdir(parents=__UpperCAmelCase , exist_ok=__UpperCAmelCase )
os.replace(__UpperCAmelCase , __UpperCAmelCase )
try:
os.symlink(__UpperCAmelCase , __UpperCAmelCase )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A_ : Tuple = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt")
if not os.path.isfile(cache_version_file):
A_ : Tuple = 0
else:
with open(cache_version_file) as f:
try:
A_ : Optional[Any] = int(f.read())
except ValueError:
A_ : List[str] = 0
if cache_version < 1:
A_ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your "
"existing cached models. This is a one-time operation, you can interrupt it or run it "
"later by calling `diffusers.utils.hub_utils.move_cache()`."
)
try:
move_cache()
except Exception as e:
A_ : str = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
"file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole "
"message and we will do our best to help."
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, "w") as f:
f.write("1")
except Exception:
logger.warning(
f"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
"the directory exists and can be written to."
)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None ):
if variant is not None:
lowerCamelCase__ : int = weights_name.split('.' )
lowerCamelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowerCamelCase__ : str = '.'.join(__UpperCAmelCase )
return weights_name
def lowerCamelCase_ ( _lowerCamelCase , *,
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
lowerCamelCase__ : Optional[Any] = str(__UpperCAmelCase )
if os.path.isfile(__UpperCAmelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCAmelCase ):
if os.path.isfile(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ):
# Load from a PyTorch checkpoint
lowerCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ):
lowerCamelCase__ : Optional[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCAmelCase ).base_version ) >= version.parse('0.20.0' )
):
try:
lowerCamelCase__ : Any = hf_hub_download(
__UpperCAmelCase , filename=_add_variant(__UpperCAmelCase , __UpperCAmelCase ) , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , __UpperCAmelCase , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCAmelCase , __UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCAmelCase , __UpperCAmelCase )}\' so that the correct variant file can be added.''' , __UpperCAmelCase , )
try:
# 2. Load model file as usual
lowerCamelCase__ : Union[str, Any] = hf_hub_download(
__UpperCAmelCase , filename=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , user_agent=__UpperCAmelCase , subfolder=__UpperCAmelCase , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'this model name. Check the model page at '
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = tmp_path / 'cache'
lowerCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ : int = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = tmp_path / 'cache'
lowerCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ : Tuple = features.copy() if features else default_expected_features
lowerCamelCase__ : Union[str, Any] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ : List[str] = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = tmp_path / 'cache'
lowerCamelCase__ : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ : List[str] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if issubclass(lowercase_ , lowercase_ ):
lowerCamelCase__ : List[Any] = parquet_path
elif issubclass(lowercase_ , lowercase_ ):
lowerCamelCase__ : Optional[Any] = [parquet_path]
lowerCamelCase__ : str = tmp_path / 'cache'
lowerCamelCase__ : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ : Dict = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ):
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
lowerCamelCase__ : List[Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[str] = tmp_path / 'cache'
lowerCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCamelCase__ : Any = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : str = tmp_path / 'cache'
lowerCamelCase__ : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ : List[Any] = features.copy() if features else default_expected_features
lowerCamelCase__ : Optional[int] = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCamelCase__ : Union[str, Any] = ParquetDatasetReader({'train': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if split:
lowerCamelCase__ : List[Any] = {split: parquet_path}
else:
lowerCamelCase__ : int = 'train'
lowerCamelCase__ : Optional[int] = {'train': parquet_path, 'test': parquet_path}
lowerCamelCase__ : int = tmp_path / 'cache'
lowerCamelCase__ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
lowerCamelCase__ : Optional[int] = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = ParquetDatasetWriter(lowercase_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
lowerCamelCase__ : List[str] = pq.ParquetFile(tmp_path / 'foo.parquet' )
lowerCamelCase__ : str = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = str(shared_datadir / 'test_image_rgb.jpg' )
lowerCamelCase__ : int = {'image': [image_path]}
lowerCamelCase__ : List[Any] = Features({'image': Image()} )
lowerCamelCase__ : List[Any] = Dataset.from_dict(lowercase_ , features=lowercase_ )
lowerCamelCase__ : Optional[Any] = ParquetDatasetWriter(lowercase_ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
lowerCamelCase__ : List[Any] = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
lowerCamelCase__ : str = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
assert get_writer_batch_size(lowercase_ ) == expected
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Dict = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class a_ ( a_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'informer'
lowerCamelCase__ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = "student_t", lowerCamelCase_ = "nll", lowerCamelCase_ = 1, lowerCamelCase_ = None, lowerCamelCase_ = "mean", lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = 0, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = 6_4, lowerCamelCase_ = 3_2, lowerCamelCase_ = 3_2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = 2, lowerCamelCase_ = True, lowerCamelCase_ = "gelu", lowerCamelCase_ = 0.05, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 0.1, lowerCamelCase_ = 1_0_0, lowerCamelCase_ = 0.02, lowerCamelCase_=True, lowerCamelCase_ = "prob", lowerCamelCase_ = 5, lowerCamelCase_ = True, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = prediction_length
lowerCamelCase__ : str = context_length or prediction_length
lowerCamelCase__ : Any = distribution_output
lowerCamelCase__ : Optional[int] = loss
lowerCamelCase__ : Optional[int] = input_size
lowerCamelCase__ : str = num_time_features
lowerCamelCase__ : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Dict = scaling
lowerCamelCase__ : str = num_dynamic_real_features
lowerCamelCase__ : List[str] = num_static_real_features
lowerCamelCase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase__ : List[Any] = cardinality
else:
lowerCamelCase__ : str = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCamelCase__ : List[str] = embedding_dimension
else:
lowerCamelCase__ : Any = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase__ : List[Any] = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase__ : List[Any] = d_model
lowerCamelCase__ : int = encoder_attention_heads
lowerCamelCase__ : Union[str, Any] = decoder_attention_heads
lowerCamelCase__ : int = encoder_ffn_dim
lowerCamelCase__ : Tuple = decoder_ffn_dim
lowerCamelCase__ : Optional[int] = encoder_layers
lowerCamelCase__ : Optional[int] = decoder_layers
lowerCamelCase__ : Any = dropout
lowerCamelCase__ : List[Any] = attention_dropout
lowerCamelCase__ : List[Any] = activation_dropout
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : List[str] = decoder_layerdrop
lowerCamelCase__ : Union[str, Any] = activation_function
lowerCamelCase__ : int = init_std
lowerCamelCase__ : List[Any] = use_cache
# Informer
lowerCamelCase__ : Any = attention_type
lowerCamelCase__ : int = sampling_factor
lowerCamelCase__ : Dict = distil
super().__init__(is_encoder_decoder=lowercase_, **lowercase_ )
@property
def a__ (self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase ):
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : int = n
while left <= right:
lowerCamelCase__ : List[str] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
lowerCamelCase__ : Optional[int] = mid - 1
else:
lowerCamelCase__ : List[str] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
import os
def lowerCamelCase_ ( ):
with open(os.path.dirname(_lowerCamelCase ) + '/p022_names.txt' ) as file:
lowerCamelCase__ : str = str(file.readlines()[0] )
lowerCamelCase__ : Optional[int] = names.replace('\"' , '' ).split(',' )
names.sort()
lowerCamelCase__ : Any = 0
lowerCamelCase__ : List[Any] = 0
for i, name in enumerate(_lowerCamelCase ):
for letter in name:
name_score += ord(_lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowerCamelCase__ : str = 0
return total_score
if __name__ == "__main__":
print(solution())
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
from __future__ import annotations
from random import choice
def lowerCamelCase_ ( _lowerCamelCase ):
return choice(_a )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = random_pivot(_a )
# partition based on pivot
# linear time
lowerCamelCase__ : Union[str, Any] = [e for e in lst if e < pivot]
lowerCamelCase__ : Optional[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_a ) < k - 1:
return kth_number(_a , k - len(_a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str = logging.get_logger(__name__)
A_ : Tuple = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase_ ( _lowerCamelCase ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase__ : Optional[int] = model_type_to_module_name(__lowerCAmelCase )
lowerCamelCase__ : int = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(__lowerCAmelCase , __lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowerCAmelCase , '__name__' , __lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase__ : List[Any] = importlib.import_module('transformers' )
if hasattr(__lowerCAmelCase , __lowerCAmelCase ):
return getattr(__lowerCAmelCase , __lowerCAmelCase )
return None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , **_lowerCamelCase , ):
lowerCamelCase__ : Union[str, Any] = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , resume_download=__lowerCAmelCase , proxies=__lowerCAmelCase , use_auth_token=__lowerCAmelCase , revision=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__lowerCAmelCase , encoding='utf-8' ) as reader:
return json.load(__lowerCAmelCase )
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case__ )
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = kwargs.pop('config', snake_case__ )
lowerCamelCase__ : List[Any] = kwargs.pop('trust_remote_code', snake_case__ )
lowerCamelCase__ : int = True
lowerCamelCase__ : str = ImageProcessingMixin.get_image_processor_dict(snake_case__, **snake_case__ )
lowerCamelCase__ : int = config_dict.get('image_processor_type', snake_case__ )
lowerCamelCase__ : List[str] = None
if "AutoImageProcessor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Optional[int] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCamelCase__ : str = config_dict.pop('feature_extractor_type', snake_case__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
lowerCamelCase__ : List[str] = feature_extractor_class.replace('FeatureExtractor', 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map', {} ):
lowerCamelCase__ : Union[str, Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
lowerCamelCase__ : Union[str, Any] = feature_extractor_auto_map.replace('FeatureExtractor', 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case__, snake_case__ ):
lowerCamelCase__ : int = AutoConfig.from_pretrained(snake_case__, **snake_case__ )
# It could be in `config.image_processor_type``
lowerCamelCase__ : Optional[Any] = getattr(snake_case__, 'image_processor_type', snake_case__ )
if hasattr(snake_case__, 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
lowerCamelCase__ : Union[str, Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
lowerCamelCase__ : List[Any] = image_processor_class_from_name(snake_case__ )
lowerCamelCase__ : Optional[int] = image_processor_auto_map is not None
lowerCamelCase__ : Union[str, Any] = image_processor_class is not None or type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING
lowerCamelCase__ : Any = resolve_trust_remote_code(
snake_case__, snake_case__, snake_case__, snake_case__ )
if has_remote_code and trust_remote_code:
lowerCamelCase__ : Optional[Any] = get_class_from_dynamic_module(
snake_case__, snake_case__, **snake_case__ )
lowerCamelCase__ : int = kwargs.pop('code_revision', snake_case__ )
if os.path.isdir(snake_case__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case__, **snake_case__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case__, **snake_case__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case__ ) in IMAGE_PROCESSOR_MAPPING:
lowerCamelCase__ : List[str] = IMAGE_PROCESSOR_MAPPING[type(snake_case__ )]
return image_processor_class.from_dict(snake_case__, **snake_case__ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(snake_case__, snake_case__ )
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import math
def lowerCamelCase_ ( ):
lowerCamelCase__ : str = input('Enter message: ' )
lowerCamelCase__ : Dict = int(input(f'''Enter key [2-{len(_lowercase ) - 1}]: ''' ) )
lowerCamelCase__ : Union[str, Any] = input('Encryption/Decryption [e/d]: ' )
if mode.lower().startswith('e' ):
lowerCamelCase__ : int = encrypt_message(_lowercase , _lowercase )
elif mode.lower().startswith('d' ):
lowerCamelCase__ : Union[str, Any] = decrypt_message(_lowercase , _lowercase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + '|'}''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = [""] * key
for col in range(_lowercase ):
lowerCamelCase__ : Optional[Any] = col
while pointer < len(_lowercase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_lowercase )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Tuple = math.ceil(len(_lowercase ) / key )
lowerCamelCase__ : List[Any] = key
lowerCamelCase__ : int = (num_cols * num_rows) - len(_lowercase )
lowerCamelCase__ : Dict = [""] * num_cols
lowerCamelCase__ : Any = 0
lowerCamelCase__ : str = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCamelCase__ : int = 0
row += 1
return "".join(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : int = logging.get_logger(__name__)
A_ : Tuple = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class a_ ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase__ : int = "deta"
lowerCamelCase__ : Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=9_0_0, lowerCamelCase_=2_0_4_8, lowerCamelCase_=6, lowerCamelCase_=2_0_4_8, lowerCamelCase_=8, lowerCamelCase_=6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=8, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_="relu", lowerCamelCase_=2_5_6, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=1.0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_="sine", lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=True, lowerCamelCase_=3_0_0, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=1, lowerCamelCase_=1, lowerCamelCase_=5, lowerCamelCase_=2, lowerCamelCase_=0.1, lowerCamelCase_=0.25, **lowerCamelCase_, ):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCamelCase__ : Optional[int] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__, a__ ):
lowerCamelCase__ : int = backbone_config.pop('model_type' )
lowerCamelCase__ : int = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase__ : Optional[Any] = config_class.from_dict(a__ )
lowerCamelCase__ : Any = backbone_config
lowerCamelCase__ : Optional[Any] = num_queries
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Optional[Any] = encoder_ffn_dim
lowerCamelCase__ : int = encoder_layers
lowerCamelCase__ : Dict = encoder_attention_heads
lowerCamelCase__ : List[str] = decoder_ffn_dim
lowerCamelCase__ : Optional[int] = decoder_layers
lowerCamelCase__ : Dict = decoder_attention_heads
lowerCamelCase__ : Dict = dropout
lowerCamelCase__ : Optional[int] = attention_dropout
lowerCamelCase__ : Tuple = activation_dropout
lowerCamelCase__ : Optional[Any] = activation_function
lowerCamelCase__ : Optional[Any] = init_std
lowerCamelCase__ : int = init_xavier_std
lowerCamelCase__ : List[Any] = encoder_layerdrop
lowerCamelCase__ : List[Any] = auxiliary_loss
lowerCamelCase__ : List[Any] = position_embedding_type
# deformable attributes
lowerCamelCase__ : Tuple = num_feature_levels
lowerCamelCase__ : str = encoder_n_points
lowerCamelCase__ : List[str] = decoder_n_points
lowerCamelCase__ : Any = two_stage
lowerCamelCase__ : Any = two_stage_num_proposals
lowerCamelCase__ : Tuple = with_box_refine
lowerCamelCase__ : Tuple = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCamelCase__ : Optional[Any] = class_cost
lowerCamelCase__ : Optional[Any] = bbox_cost
lowerCamelCase__ : Dict = giou_cost
# Loss coefficients
lowerCamelCase__ : Any = mask_loss_coefficient
lowerCamelCase__ : Optional[Any] = dice_loss_coefficient
lowerCamelCase__ : Optional[Any] = bbox_loss_coefficient
lowerCamelCase__ : Optional[int] = giou_loss_coefficient
lowerCamelCase__ : Optional[int] = eos_coefficient
lowerCamelCase__ : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=a__, **a__ )
@property
def a__ (self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def a__ (self ):
'''simple docstring'''
return self.d_model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : Optional[Any] = self.backbone_config.to_dict()
lowerCamelCase__ : List[Any] = self.__class__.model_type
return output
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
class a_ ( _lowerCAmelCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ : List[str] = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = True, ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.nets ) ):
lowerCamelCase__ : Any = controlnet(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, )
# merge samples
if i == 0:
lowerCamelCase__ : Optional[int] = down_samples, mid_sample
else:
lowerCamelCase__ : Union[str, Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def a__ (self, lowerCamelCase_, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = None, ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Union[str, Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_, is_main_process=SCREAMING_SNAKE_CASE_, save_function=SCREAMING_SNAKE_CASE_, safe_serialization=SCREAMING_SNAKE_CASE_, variant=SCREAMING_SNAKE_CASE_, )
idx += 1
lowerCamelCase__ : Dict = model_path_to_save + f'''_{idx}'''
@classmethod
def a__ (cls, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase__ : Optional[int] = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ : str = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
controlnets.append(SCREAMING_SNAKE_CASE_ )
idx += 1
lowerCamelCase__ : Optional[int] = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(SCREAMING_SNAKE_CASE_ )} controlnets loaded from {pretrained_model_path}.''' )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_ )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(SCREAMING_SNAKE_CASE_ )
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Any = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class a_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = 'visual_bert'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=5_1_2, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__, bos_token_id=UpperCamelCase__, eos_token_id=UpperCamelCase__, **UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : int = visual_embedding_dim
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : List[str] = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : int = bypass_transformer
lowerCamelCase__ : List[str] = special_visual_initialize
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( _a ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = """Salesforce/blip-image-captioning-base"""
lowerCamelCase__ : Any = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
lowerCamelCase__ : str = """image_captioner"""
lowerCamelCase__ : str = AutoModelForVisionaSeq
lowerCamelCase__ : Tuple = ["""image"""]
lowerCamelCase__ : Optional[Any] = ["""text"""]
def __init__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
requires_backends(self, ['vision'] )
super().__init__(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.pre_processor(images=lowerCamelCase_, return_tensors='pt' )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.model.generate(**lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase_, skip_special_tokens=lowerCamelCase_ )[0].strip()
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="resnet50", lowerCamelCase_=3, lowerCamelCase_=3_2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : str = parent
lowerCamelCase__ : Tuple = out_indices if out_indices is not None else [4]
lowerCamelCase__ : int = stage_names
lowerCamelCase__ : Optional[Any] = out_features
lowerCamelCase__ : str = backbone
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : List[Any] = use_pretrained_backbone
lowerCamelCase__ : Optional[Any] = is_training
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = self.get_config()
return config, pixel_values
def a__ (self ):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = TimmBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(_lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 1_4, 1_4), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ : Dict = config_and_inputs
lowerCamelCase__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = (TimmBackbone,) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TimmBackboneModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self, config_class=_lowercase, has_text_modality=_lowercase )
def a__ (self ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = 'resnet18'
lowerCamelCase__ : int = 'microsoft/resnet-18'
lowerCamelCase__ : List[Any] = AutoBackbone.from_pretrained(_lowercase, use_timm_backbone=_lowercase )
lowerCamelCase__ : List[str] = AutoBackbone.from_pretrained(_lowercase )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ), len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices, (-1,) )
self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names ) - 1] )
lowerCamelCase__ : List[str] = AutoBackbone.from_pretrained(_lowercase, use_timm_backbone=_lowercase, out_indices=[1, 2, 3] )
lowerCamelCase__ : Optional[Any] = AutoBackbone.from_pretrained(_lowercase, out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices, transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ), len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels, transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Safetensors is not supported by timm.' )
def a__ (self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(_lowercase )
lowerCamelCase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1], _lowercase )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCamelCase__ : Optional[Any] = self.all_model_classes[0]
lowerCamelCase__ : Tuple = model_class(_lowercase )
model.to(_lowercase )
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(_lowercase, _lowercase )
lowerCamelCase__ : List[Any] = model(**_lowercase )
lowerCamelCase__ : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCamelCase__ : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCamelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : int = model(**_lowercase )
self.assertEqual(len(result.feature_maps ), len(config.out_indices ) )
self.assertEqual(len(model.channels ), len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCamelCase__ : Tuple = copy.deepcopy(_lowercase )
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : str = model(**_lowercase )
self.assertEqual(len(result.feature_maps ), 1 )
self.assertEqual(len(model.channels ), 1 )
# Check backbone can be initialized with fresh weights
lowerCamelCase__ : Tuple = copy.deepcopy(_lowercase )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase__ : List[Any] = model(**_lowercase )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase_ ( _lowerCamelCase = "" ):
lowerCamelCase__ : List[str] = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
lowerCamelCase__ : Any = BeautifulSoup(requests.get(a__ ).text , 'html.parser' )
lowerCamelCase__ : Tuple = soup.find_all('td' , attrs='titleColumn' )
lowerCamelCase__ : List[str] = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def lowerCamelCase_ ( _lowerCamelCase = "IMDb_Top_250_Movies.csv" ):
lowerCamelCase__ : Optional[Any] = get_imdb_top_aaa_movies()
with open(a__ , 'w' , newline='' ) as out_file:
lowerCamelCase__ : str = csv.writer(a__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class a_ ( snake_case_ ):
'''simple docstring'''
def __get__(self, lowerCamelCase_, lowerCamelCase_=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
lowerCamelCase__ : Optional[Any] = '__cached_' + self.fget.__name__
lowerCamelCase__ : List[Any] = getattr(_A, _A, _A )
if cached is None:
lowerCamelCase__ : int = self.fget(_A )
setattr(_A, _A, _A )
return cached
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
if is_torch_fx_proxy(__a ):
return True
if is_torch_available():
import torch
if isinstance(__a , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__a , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__a , (jnp.ndarray, Tracer) ):
return True
return isinstance(__a , np.ndarray )
def lowerCamelCase_ ( _lowerCamelCase ):
return isinstance(__a , np.ndarray )
def lowerCamelCase_ ( _lowerCamelCase ):
return _is_numpy(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import torch
return isinstance(__a , torch.Tensor )
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import torch
return isinstance(__a , torch.device )
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import torch
if isinstance(__a , __a ):
if hasattr(__a , __a ):
lowerCamelCase__ : str = getattr(__a , __a )
else:
return False
return isinstance(__a , torch.dtype )
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import tensorflow as tf
return isinstance(__a , tf.Tensor )
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__a , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__a )
return type(__a ) == tf.Tensor
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(__a , jnp.ndarray )
def lowerCamelCase_ ( _lowerCamelCase ):
return False if not is_flax_available() else _is_jax(__a )
def lowerCamelCase_ ( _lowerCamelCase ):
if isinstance(__a , (dict, UserDict) ):
return {k: to_py_obj(__a ) for k, v in obj.items()}
elif isinstance(__a , (list, tuple) ):
return [to_py_obj(__a ) for o in obj]
elif is_tf_tensor(__a ):
return obj.numpy().tolist()
elif is_torch_tensor(__a ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__a ):
return np.asarray(__a ).tolist()
elif isinstance(__a , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase_ ( _lowerCamelCase ):
if isinstance(__a , (dict, UserDict) ):
return {k: to_numpy(__a ) for k, v in obj.items()}
elif isinstance(__a , (list, tuple) ):
return np.array(__a )
elif is_tf_tensor(__a ):
return obj.numpy()
elif is_torch_tensor(__a ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__a ):
return np.asarray(__a )
else:
return obj
class a_ ( snake_case_ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCamelCase__ : Any = getattr(self, class_fields[0].name )
lowerCamelCase__ : Optional[int] = all(getattr(self, field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A, _A ):
lowerCamelCase__ : Optional[int] = first_field.items()
lowerCamelCase__ : Dict = True
else:
try:
lowerCamelCase__ : Optional[Any] = iter(_A )
lowerCamelCase__ : Tuple = True
except TypeError:
lowerCamelCase__ : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A, (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0], _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCamelCase__ : Optional[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self, element[0], element[1] )
if element[1] is not None:
lowerCamelCase__ : Tuple = element[1]
elif first_field is not None:
lowerCamelCase__ : List[Any] = first_field
else:
for field in class_fields:
lowerCamelCase__ : Union[str, Any] = getattr(self, field.name )
if v is not None:
lowerCamelCase__ : Dict = v
def __delitem__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
if isinstance(_A, _A ):
lowerCamelCase__ : Dict = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A, _A )
super().__setattr__(_A, _A )
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
super().__setitem__(_A, _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A, _A )
def a__ (self ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class a_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
@classmethod
def a__ (cls, lowerCamelCase_ ):
'''simple docstring'''
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "longest"
lowerCamelCase__ : Union[str, Any] = "max_length"
lowerCamelCase__ : Tuple = "do_not_pad"
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = "pt"
lowerCamelCase__ : int = "tf"
lowerCamelCase__ : Any = "np"
lowerCamelCase__ : int = "jax"
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = context_managers
lowerCamelCase__ : Optional[Any] = ExitStack()
def __enter__(self ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
self.stack.__exit__(*_A, **_A )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = infer_framework(__a )
if framework == "tf":
lowerCamelCase__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : str = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = model_class.__name__
lowerCamelCase__ : Optional[int] = infer_framework(__a )
if framework == "tf":
lowerCamelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCamelCase__ : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCamelCase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = "" , _lowerCamelCase = "." ):
def _flatten_dict(_lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase="." ):
for k, v in d.items():
lowerCamelCase__ : Any = str(__a ) + delimiter + str(__a ) if parent_key else k
if v and isinstance(__a , __a ):
yield from flatten_dict(__a , __a , delimiter=__a ).items()
else:
yield key, v
return dict(_flatten_dict(__a , __a , __a ) )
@contextmanager
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None ):
if is_numpy_array(__a ):
return np.transpose(__a , axes=__a )
elif is_torch_tensor(__a ):
return array.T if axes is None else array.permute(*__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.transpose(__a , perm=__a )
elif is_jax_tensor(__a ):
return jnp.transpose(__a , axes=__a )
else:
raise ValueError(f'''Type not supported for transpose: {type(__a )}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if is_numpy_array(__a ):
return np.reshape(__a , __a )
elif is_torch_tensor(__a ):
return array.reshape(*__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.reshape(__a , __a )
elif is_jax_tensor(__a ):
return jnp.reshape(__a , __a )
else:
raise ValueError(f'''Type not supported for reshape: {type(__a )}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase=None ):
if is_numpy_array(__a ):
return np.squeeze(__a , axis=__a )
elif is_torch_tensor(__a ):
return array.squeeze() if axis is None else array.squeeze(dim=__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.squeeze(__a , axis=__a )
elif is_jax_tensor(__a ):
return jnp.squeeze(__a , axis=__a )
else:
raise ValueError(f'''Type not supported for squeeze: {type(__a )}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if is_numpy_array(__a ):
return np.expand_dims(__a , __a )
elif is_torch_tensor(__a ):
return array.unsqueeze(dim=__a )
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.expand_dims(__a , axis=__a )
elif is_jax_tensor(__a ):
return jnp.expand_dims(__a , axis=__a )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__a )}.''' )
def lowerCamelCase_ ( _lowerCamelCase ):
if is_numpy_array(__a ):
return np.size(__a )
elif is_torch_tensor(__a ):
return array.numel()
elif is_tf_tensor(__a ):
import tensorflow as tf
return tf.size(__a )
elif is_jax_tensor(__a ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(__a )}.''' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(__a , (tuple, list) ):
lowerCamelCase__ : Any = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCamelCase__ : Dict = f'''{repo_id}--{value}'''
return auto_map
def lowerCamelCase_ ( _lowerCamelCase ):
for base_class in inspect.getmro(__a ):
lowerCamelCase__ : Tuple = base_class.__module__
lowerCamelCase__ : List[Any] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
A_ : Optional[int] = False
@skip_mps
class a_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = StableDiffusionAttendAndExcitePipeline
lowerCamelCase__ : int = False
lowerCamelCase__ : int = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase__ : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ (cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=lowerCamelCase_, )
lowerCamelCase__ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=lowerCamelCase_, set_alpha_to_one=lowerCamelCase_, )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act='gelu', projection_dim=5_1_2, )
lowerCamelCase__ : List[str] = CLIPTextModel(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : List[str] = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[str] = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = """cpu"""
lowerCamelCase__ : Optional[int] = self.get_dummy_components()
lowerCamelCase__ : List[str] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ ).images
lowerCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
lowerCamelCase__ : int = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase__ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_, 1e-3 )
def a__ (self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4 )
def a__ (self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ (self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ (self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ (self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = torch.manual_seed(5_1 )
lowerCamelCase__ : str = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4', safety_checker=lowerCamelCase_, torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase__ : List[Any] = """a painting of an elephant with glasses"""
lowerCamelCase__ : str = [5, 7]
lowerCamelCase__ : int = pipe(
prompt=lowerCamelCase_, token_indices=lowerCamelCase_, guidance_scale=7.5, generator=lowerCamelCase_, num_inference_steps=5, max_iter_to_alter=5, output_type='numpy', ).images[0]
lowerCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
import math
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : str = 0
lowerCamelCase__ : Union[str, Any] = 0
while num > 0:
lowerCamelCase__ : Dict = num % 8
lowerCamelCase__ : Optional[Any] = octal + (remainder * math.floor(math.pow(10 , lowerCAmelCase__ ) ))
counter += 1
lowerCamelCase__ : Any = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(lowerCAmelCase__ )}'''
def lowerCamelCase_ ( ):
print('\n2 in octal is:' )
print(decimal_to_octal(2 ) ) # = 2
print('\n8 in octal is:' )
print(decimal_to_octal(8 ) ) # = 10
print('\n65 in octal is:' )
print(decimal_to_octal(65 ) ) # = 101
print('\n216 in octal is:' )
print(decimal_to_octal(216 ) ) # = 330
print('\n512 in octal is:' )
print(decimal_to_octal(512 ) ) # = 1000
print('\n' )
if __name__ == "__main__":
main()
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A_ : List[Any] = logging.getLogger(__name__)
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : str
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[str]]
@dataclass
class a_ :
'''simple docstring'''
lowerCamelCase__ : List[int]
lowerCamelCase__ : List[int]
lowerCamelCase__ : Optional[List[int]] = None
lowerCamelCase__ : Optional[List[int]] = None
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = "train"
lowerCamelCase__ : Any = "dev"
lowerCamelCase__ : Any = "test"
class a_ :
'''simple docstring'''
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def a__ (lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False, lowerCamelCase_="[CLS]", lowerCamelCase_=1, lowerCamelCase_="[SEP]", lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=0, lowerCamelCase_=0, lowerCamelCase_=-1_0_0, lowerCamelCase_=0, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {label: i for i, label in enumerate(lowerCamelCase_ )}
lowerCamelCase__ : int = []
for ex_index, example in enumerate(lowerCamelCase_ ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d', lowerCamelCase_, len(lowerCamelCase_ ) )
lowerCamelCase__ : Dict = []
lowerCamelCase__ : List[Any] = []
for word, label in zip(example.words, example.labels ):
lowerCamelCase__ : int = tokenizer.tokenize(lowerCamelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(lowerCamelCase_ ) > 0:
tokens.extend(lowerCamelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(lowerCamelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCamelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add()
if len(lowerCamelCase_ ) > max_seq_length - special_tokens_count:
lowerCamelCase__ : Union[str, Any] = tokens[: (max_seq_length - special_tokens_count)]
lowerCamelCase__ : Optional[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCamelCase__ : Union[str, Any] = [sequence_a_segment_id] * len(lowerCamelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCamelCase__ : Any = [cls_token] + tokens
lowerCamelCase__ : Dict = [pad_token_label_id] + label_ids
lowerCamelCase__ : int = [cls_token_segment_id] + segment_ids
lowerCamelCase__ : str = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCamelCase__ : Tuple = [1 if mask_padding_with_zero else 0] * len(lowerCamelCase_ )
# Zero-pad up to the sequence length.
lowerCamelCase__ : List[str] = max_seq_length - len(lowerCamelCase_ )
if pad_on_left:
lowerCamelCase__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
lowerCamelCase__ : Union[str, Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCamelCase__ : List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
lowerCamelCase__ : Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(lowerCamelCase_ ) == max_seq_length
assert len(lowerCamelCase_ ) == max_seq_length
assert len(lowerCamelCase_ ) == max_seq_length
assert len(lowerCamelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s', example.guid )
logger.info('tokens: %s', ' '.join([str(lowerCamelCase_ ) for x in tokens] ) )
logger.info('input_ids: %s', ' '.join([str(lowerCamelCase_ ) for x in input_ids] ) )
logger.info('input_mask: %s', ' '.join([str(lowerCamelCase_ ) for x in input_mask] ) )
logger.info('segment_ids: %s', ' '.join([str(lowerCamelCase_ ) for x in segment_ids] ) )
logger.info('label_ids: %s', ' '.join([str(lowerCamelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase__ : str = None
features.append(
InputFeatures(
input_ids=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, label_ids=lowerCamelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_=False, lowerCamelCase_ = Split.train, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = os.path.join(
lowerCamelCase_, 'cached_{}_{}_{}'.format(mode.value, tokenizer.__class__.__name__, str(lowerCamelCase_ ) ), )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase__ : Union[str, Any] = cached_features_file + '.lock'
with FileLock(lowerCamelCase_ ):
if os.path.exists(lowerCamelCase_ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
lowerCamelCase__ : str = torch.load(lowerCamelCase_ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
lowerCamelCase__ : List[str] = token_classification_task.read_examples_from_file(lowerCamelCase_, lowerCamelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase__ : Union[str, Any] = token_classification_task.convert_examples_to_features(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, cls_token_at_end=bool(model_type in ['xlnet'] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase_, pad_on_left=bool(tokenizer.padding_side == 'left' ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features, lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a_ :
'''simple docstring'''
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = -100
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_=False, lowerCamelCase_ = Split.train, ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = token_classification_task.read_examples_from_file(lowerCamelCase_, lowerCamelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase__ : Union[str, Any] = token_classification_task.convert_examples_to_features(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, cls_token_at_end=bool(model_type in ['xlnet'] ), cls_token=tokenizer.cls_token, cls_token_segment_id=2 if model_type in ['xlnet'] else 0, sep_token=tokenizer.sep_token, sep_token_extra=lowerCamelCase_, pad_on_left=bool(tokenizer.padding_side == 'left' ), pad_token=tokenizer.pad_token_id, pad_token_segment_id=tokenizer.pad_token_type_id, pad_token_label_id=self.pad_token_label_id, )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase__ : Dict = tf.data.Dataset.from_generator(
lowerCamelCase_, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa), (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
), )
else:
lowerCamelCase__ : int = tf.data.Dataset.from_generator(
lowerCamelCase_, ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa), (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__(self ):
'''simple docstring'''
return len(self.features )
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
return self.features[i]
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--user", type=str, default="ubuntu")
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--key_path", type=str, default=None)
parser.add_argument("--instance", type=str, default="V100:1")
parser.add_argument("--provider", type=str, default="cheapest")
parser.add_argument("--use_spot", type=bool, default=False)
parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py")
A_ : str = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("Cannot specify both BYO and on-demand cluster args")
A_ : str = rh.cluster(
name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path}
)
else:
A_ : Any = rh.cluster(
name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
A_ : int = args.example.rsplit("/", 1)[0]
# Set up remote environment
cluster.install_packages(["pip:./"]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"])
cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : Any = {"vocab_file": "vocab.txt"}
A_ : Tuple = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
A_ : List[Any] = {
"facebook/esm2_t6_8M_UR50D": 10_24,
"facebook/esm2_t12_35M_UR50D": 10_24,
}
def lowerCamelCase_ ( _lowerCamelCase ):
with open(_lowerCamelCase , 'r' ) as f:
lowerCamelCase__ : Tuple = f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self, lowerCamelCase_, lowerCamelCase_="<unk>", lowerCamelCase_="<cls>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_="<eos>", **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
lowerCamelCase__ : int = load_vocab_file(UpperCamelCase_ )
lowerCamelCase__ : Dict = dict(enumerate(self.all_tokens ) )
lowerCamelCase__ : Tuple = {tok: ind for ind, tok in enumerate(self.all_tokens )}
lowerCamelCase__ : Optional[Any] = unk_token
lowerCamelCase__ : Any = cls_token
lowerCamelCase__ : Any = pad_token
lowerCamelCase__ : List[Any] = mask_token
lowerCamelCase__ : Dict = eos_token
lowerCamelCase__ : Optional[int] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase_, self.unk_token )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase_, self._token_to_id.get(self.unk_token ) )
def a__ (self, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return text.split()
def a__ (self, lowerCamelCase_=False ):
'''simple docstring'''
return len(self._id_to_token )
def a__ (self ):
'''simple docstring'''
return {token: i for i, token in enumerate(self.all_tokens )}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self._token_to_id.get(UpperCamelCase_, self._token_to_id.get(self.unk_token ) )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self._id_to_token.get(UpperCamelCase_, self.unk_token )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Any = [self.cls_token_id]
lowerCamelCase__ : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
lowerCamelCase__ : Optional[int] = [1] + ([0] * len(UpperCamelCase_ )) + [1]
if token_ids_a is not None:
mask += [0] * len(UpperCamelCase_ ) + [1]
return mask
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = os.path.join(UpperCamelCase_, (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' )
with open(UpperCamelCase_, 'w' ) as f:
f.write('\n'.join(self.all_tokens ) )
return (vocab_file,)
@property
def a__ (self ):
'''simple docstring'''
return self.get_vocab_size(with_added_tokens=UpperCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = False ):
'''simple docstring'''
return super()._add_tokens(UpperCamelCase_, special_tokens=UpperCamelCase_ )
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A_ : Tuple = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A_ : str = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A_ : Tuple = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Any = float(fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = np.array(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = np.array(lowerCamelCase_ )
lowerCamelCase__ : Any = en_sentvecs.shape[0]
# mean centering
lowerCamelCase__ : Tuple = en_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowerCamelCase__ : Optional[int] = in_sentvecs - np.mean(lowerCamelCase_ , axis=0 )
lowerCamelCase__ : Any = cdist(lowerCamelCase_ , lowerCamelCase_ , 'cosine' )
lowerCamelCase__ : Tuple = np.array(range(lowerCamelCase_ ) )
lowerCamelCase__ : List[str] = sim.argsort(axis=1 )[:, :10]
lowerCamelCase__ : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ), codebase_urls=[], reference_urls=[], format='numpy' if self.config_name != 'cvit-mkb-clsr' else None, )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__lowerCamelCase, __lowerCamelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__lowerCamelCase, __lowerCamelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__lowerCamelCase, __lowerCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : Union[str, Any] = logging.get_logger(__name__)
A_ : Optional[Any] = "▁"
A_ : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
A_ : Optional[Any] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
A_ : Dict = {
"facebook/m2m100_418M": 10_24,
}
# fmt: off
A_ : Union[str, Any] = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : int = VOCAB_FILES_NAMES
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : List[Any] = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ : List[int] = []
lowerCamelCase__ : List[int] = []
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<pad>", lowerCamelCase_="<unk>", lowerCamelCase_="m2m100", lowerCamelCase_ = None, lowerCamelCase_=8, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ : Optional[int] = language_codes
lowerCamelCase__ : Dict = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCamelCase__ : Union[str, Any] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowerCamelCase__ : Dict = kwargs.get('additional_special_tokens', [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_A )
for lang_code in fairseq_language_code
if self.get_lang_token(_A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A, tgt_lang=_A, bos_token=_A, eos_token=_A, sep_token=_A, unk_token=_A, pad_token=_A, language_codes=_A, sp_model_kwargs=self.sp_model_kwargs, num_madeup_words=_A, **_A, )
lowerCamelCase__ : Optional[int] = vocab_file
lowerCamelCase__ : Dict = load_json(_A )
lowerCamelCase__ : Tuple = {v: k for k, v in self.encoder.items()}
lowerCamelCase__ : str = spm_file
lowerCamelCase__ : List[Any] = load_spm(_A, self.sp_model_kwargs )
lowerCamelCase__ : Dict = len(self.encoder )
lowerCamelCase__ : List[Any] = {
self.get_lang_token(_A ): self.encoder_size + i for i, lang_code in enumerate(_A )
}
lowerCamelCase__ : str = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_A )}
lowerCamelCase__ : Tuple = {v: k for k, v in self.lang_token_to_id.items()}
lowerCamelCase__ : Optional[Any] = src_lang if src_lang is not None else 'en'
lowerCamelCase__ : List[Any] = tgt_lang
lowerCamelCase__ : Optional[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCamelCase__ : Optional[Any] = num_madeup_words
@property
def a__ (self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(_A, out_type=_A )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_A, self.encoder[self.unk_token] )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_A, self.unk_token )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Union[str, Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_A ) + token
lowerCamelCase__ : str = []
else:
current_sub_tokens.append(_A )
out_string += self.sp_model.decode(_A )
return out_string.strip()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A, token_ids_a=_A, already_has_special_tokens=_A )
lowerCamelCase__ : int = [1] * len(self.prefix_tokens )
lowerCamelCase__ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.__dict__.copy()
lowerCamelCase__ : Optional[int] = None
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : int = {}
lowerCamelCase__ : Union[str, Any] = load_spm(self.spm_file, self.sp_model_kwargs )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = Path(_A )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
lowerCamelCase__ : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
lowerCamelCase__ : str = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder, _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file, _A )
elif not os.path.isfile(self.spm_file ):
with open(_A, 'wb' ) as fi:
lowerCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "en", lowerCamelCase_ = None, lowerCamelCase_ = "ro", **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : int = src_lang
lowerCamelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_A, _A, **_A )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ : List[str] = src_lang
lowerCamelCase__ : Union[str, Any] = self(_A, add_special_tokens=_A, **_A )
lowerCamelCase__ : int = self.get_lang_id(_A )
lowerCamelCase__ : Union[str, Any] = tgt_lang_id
return inputs
def a__ (self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.get_lang_token(_A )
lowerCamelCase__ : Union[str, Any] = self.lang_token_to_id[lang_token]
lowerCamelCase__ : Dict = [self.cur_lang_id]
lowerCamelCase__ : int = [self.eos_token_id]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_lang_token(_A )
lowerCamelCase__ : Optional[Any] = self.lang_token_to_id[lang_token]
lowerCamelCase__ : Optional[int] = [self.cur_lang_id]
lowerCamelCase__ : int = [self.eos_token_id]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_lang_token(_A )
return self.lang_token_to_id[lang_token]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def lowerCamelCase_ ( _lowerCamelCase ):
with open(UpperCAmelCase_ , 'r' ) as f:
return json.load(UpperCAmelCase_ )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
with open(UpperCAmelCase_ , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_A ) == 1:
return True
lowerCamelCase__ : int = series[1] - series[0]
for index in range(len(_A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_A , _A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_A ) == 0:
raise ValueError('Input list must be a non empty list' )
lowerCamelCase__ : Dict = 0
for val in series:
answer += val
return answer / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A_ : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
A_ : int = direct_transformers_import(PATH_TO_TRANSFORMERS)
A_ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A_ : List[str] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : int = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
lowerCamelCase__ : Any = True
# Deal with multi-line cases
elif (
re.search(
rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , lowerCAmelCase__ , )
is not None
):
lowerCamelCase__ : str = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowerCamelCase__ : Any = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowerCamelCase__ : int = [
'bos_index',
'eos_index',
'pad_index',
'unk_index',
'mask_index',
'image_size',
'use_cache',
'out_features',
'out_indices',
]
lowerCamelCase__ : str = ['encoder_no_repeat_ngram_size']
# Special cases to be allowed
lowerCamelCase__ : Optional[Any] = True
if not attribute_used:
lowerCamelCase__ : Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowerCamelCase__ : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowerCamelCase__ : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowerCamelCase__ : Dict = True
elif attribute.endswith('_token_id' ):
lowerCamelCase__ : Dict = True
# configuration class specific cases
if not case_allowed:
lowerCamelCase__ : Union[str, Any] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowerCamelCase__ : Tuple = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : int = dict(inspect.signature(config_class.__init__ ).parameters )
lowerCamelCase__ : Tuple = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']]
lowerCamelCase__ : Union[str, Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowerCamelCase__ : int = {}
if len(config_class.attribute_map ) > 0:
lowerCamelCase__ : Any = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowerCamelCase__ : Dict = inspect.getsourcefile(lowerCAmelCase__ )
lowerCamelCase__ : Optional[int] = os.path.dirname(lowerCAmelCase__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowerCamelCase__ : Dict = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for fn in os.listdir(lowerCAmelCase__ ) if fn.startswith('modeling_' )]
# Get the source code strings
lowerCamelCase__ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(lowerCAmelCase__ ):
with open(lowerCAmelCase__ ) as fp:
modeling_sources.append(fp.read() )
lowerCamelCase__ : Dict = []
for config_param, default_value in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
# `attributes` here is all the variant names for `config_param`
lowerCamelCase__ : Optional[int] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
unused_attributes.append(attributes[0] )
return sorted(lowerCAmelCase__ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowerCamelCase__ : List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(lowerCAmelCase__ )
and issubclass(lowerCAmelCase__ , lowerCAmelCase__ )
and inspect.getmodule(lowerCAmelCase__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowerCamelCase__ : Any = check_config_attributes_being_used(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCamelCase__ : str = unused_attributes
if len(lowerCAmelCase__ ) > 0:
lowerCamelCase__ : Optional[int] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n'
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
check_config_attributes()
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A_ : str = logging.get_logger(__name__)
A_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ : Tuple = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
A_ : List[str] = {
"gpt-neox-20b": 20_48,
}
class a_ ( __a ):
'''simple docstring'''
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_="<|endoftext|>", lowerCamelCase_="<|endoftext|>", lowerCamelCase_="<|endoftext|>", lowerCamelCase_=False, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
a__, a__, tokenizer_file=a__, unk_token=a__, bos_token=a__, eos_token=a__, add_prefix_space=a__, **a__, )
lowerCamelCase__ : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', a__ ) != add_prefix_space:
lowerCamelCase__ : str = getattr(a__, pre_tok_state.pop('type' ) )
lowerCamelCase__ : Any = add_prefix_space
lowerCamelCase__ : Dict = pre_tok_class(**a__ )
lowerCamelCase__ : int = add_prefix_space
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self._tokenizer.model.save(a__, name=a__ )
return tuple(a__ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__, add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
lowerCamelCase__ : Tuple = input_ids[-self.model_max_length :]
return input_ids
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
import functools
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCamelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase__ : List[Any] = set(_lowerCamelCase )
@functools.cache
def dynamic_programming(_lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
from manim import *
class a_ ( snake_case_ ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = Rectangle(height=0.5, width=0.5 )
lowerCamelCase__ : str = Rectangle(height=0.25, width=0.25 )
lowerCamelCase__ : Dict = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : str = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : str = VGroup(__a, __a ).arrange(__a, buff=0 )
lowerCamelCase__ : str = Text('CPU', font_size=2_4 )
lowerCamelCase__ : Any = Group(__a, __a ).arrange(__a, buff=0.5, aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
lowerCamelCase__ : List[Any] = [mem.copy() for i in range(4 )]
lowerCamelCase__ : Dict = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : Optional[Any] = Text('GPU', font_size=2_4 )
lowerCamelCase__ : Optional[int] = Group(__a, __a ).arrange(__a, buff=0.5, aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
lowerCamelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCamelCase__ : int = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : Dict = Text('Model', font_size=2_4 )
lowerCamelCase__ : str = Group(__a, __a ).arrange(__a, buff=0.5, aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
lowerCamelCase__ : int = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Union[str, Any] = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
lowerCamelCase__ : Dict = Rectangle(height=0.46 / 4, width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0], direction=__a, buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1], direction=__a, buff=0.0 )
self.add(__a )
model_cpu_arr.append(__a )
self.add(*__a, *__a, *__a )
lowerCamelCase__ : Optional[int] = [mem.copy() for i in range(6 )]
lowerCamelCase__ : List[Any] = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : Any = Text('Loaded Checkpoint', font_size=2_4 )
lowerCamelCase__ : Tuple = Group(__a, __a ).arrange(__a, buff=0.5, aligned_edge=__a )
checkpoint.move_to([3, 0.5, 0] )
self.add(__a )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = []
for i, rect in enumerate(__a ):
lowerCamelCase__ : Optional[int] = fill.copy().set_fill(__a, opacity=0.7 )
target.move_to(__a )
ckpt_arr.append(__a )
lowerCamelCase__ : List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__a )
self.add(*__a, *__a )
lowerCamelCase__ : Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : Dict = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
self.add(__a, __a )
lowerCamelCase__ : Optional[Any] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=1_8, )
blue_text.next_to(__a, DOWN * 2.4, aligned_edge=key_text.get_left() )
self.add(__a )
lowerCamelCase__ : Tuple = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''', font_size=2_4, )
step_a.move_to([2, 2, 0] )
lowerCamelCase__ : int = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : int = [meta_mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : Optional[int] = VGroup(*__a ).arrange(__a, buff=0 )
lowerCamelCase__ : List[Any] = VGroup(__a, __a ).arrange(__a, buff=0 )
lowerCamelCase__ : Union[str, Any] = Text('Disk', font_size=2_4 )
lowerCamelCase__ : Tuple = Group(__a, __a ).arrange(__a, buff=0.5, aligned_edge=__a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__a, run_time=3 ), Write(__a, run_time=1 ), Create(__a, run_time=1 ) )
lowerCamelCase__ : Optional[int] = []
for i, rect in enumerate(__a ):
lowerCamelCase__ : Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__a, run_time=1.5 ) )
self.play(*__a )
self.play(FadeOut(__a ) )
lowerCamelCase__ : Optional[Any] = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''', font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a, run_time=3 ) )
self.play(
FadeOut(__a, __a, *__a, *__a ), )
self.wait()
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
A_ : Optional[Any] = object()
# For specifying empty leaf dict `{}`
A_ : Optional[int] = object()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Dict = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(UpperCAmelCase__ ) - len(UpperCAmelCase__ ) + 1 ):
lowerCamelCase__ : Union[str, Any] = [x.match(UpperCAmelCase__ ) for x, y in zip(UpperCAmelCase__ , ks[i:] )]
if matches and all(UpperCAmelCase__ ):
return True
return False
def lowerCamelCase_ ( _lowerCamelCase ):
def replace(_lowerCamelCase , _lowerCamelCase ):
for rule, replacement in rules:
if _match(UpperCAmelCase__ , UpperCAmelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , UpperCAmelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , UpperCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCAmelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , UpperCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCAmelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , UpperCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = _get_partition_rules()
lowerCamelCase__ : Dict = _replacement_rules(UpperCAmelCase__ )
lowerCamelCase__ : Dict = {k: _unmatched for k in flatten_dict(UpperCAmelCase__ )}
lowerCamelCase__ : List[Any] = {k: replace(UpperCAmelCase__ , UpperCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCAmelCase__ ) )
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
A_ : Optional[Any] = [8, 5, 9, 7]
A_ : Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A_ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = claim_vector
lowerCamelCase__ : Optional[int] = allocated_resources_table
lowerCamelCase__ : Dict = maximum_claim_table
def a__ (self ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def a__ (self ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def a__ (self ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def a__ (self ):
'''simple docstring'''
return {self.__need().index(_A ): i for i in self.__need()}
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.__need()
lowerCamelCase__ : Any = self.__allocated_resources_table
lowerCamelCase__ : List[str] = self.__available_resources()
lowerCamelCase__ : Any = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 5_0 + '\n' )
while need_list:
lowerCamelCase__ : Union[str, Any] = False
for each_need in need_list:
lowerCamelCase__ : str = True
for index, need in enumerate(_A ):
if need > available_resources[index]:
lowerCamelCase__ : Any = False
break
if execution:
lowerCamelCase__ : Union[str, Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : int = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_A )
# update available/freed resources stack
lowerCamelCase__ : str = np.array(_A ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_A ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def a__ (self ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(_A ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(_A ) + 1}'''
+ ' '.join(f'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_A ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
A_ : Optional[Any] = r"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(a__ )
class a_ ( a__ ):
'''simple docstring'''
lowerCamelCase__ : int = "rag"
lowerCamelCase__ : List[Any] = True
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=" / ", lowerCamelCase_=" // ", lowerCamelCase_=5, lowerCamelCase_=3_0_0, lowerCamelCase_=7_6_8, lowerCamelCase_=8, lowerCamelCase_="wiki_dpr", lowerCamelCase_="train", lowerCamelCase_="compressed", lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=0.0, lowerCamelCase_=True, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE_, pad_token_id=SCREAMING_SNAKE_CASE_, eos_token_id=SCREAMING_SNAKE_CASE_, decoder_start_token_id=SCREAMING_SNAKE_CASE_, forced_eos_token_id=SCREAMING_SNAKE_CASE_, is_encoder_decoder=SCREAMING_SNAKE_CASE_, prefix=SCREAMING_SNAKE_CASE_, vocab_size=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowerCamelCase__ : Union[str, Any] = kwargs.pop('question_encoder' )
lowerCamelCase__ : str = question_encoder_config.pop('model_type' )
lowerCamelCase__ : Optional[Any] = kwargs.pop('generator' )
lowerCamelCase__ : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
lowerCamelCase__ : str = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ : Optional[Any] = reduce_loss
lowerCamelCase__ : Optional[Any] = label_smoothing
lowerCamelCase__ : int = exclude_bos_score
lowerCamelCase__ : Tuple = do_marginalize
lowerCamelCase__ : Optional[Any] = title_sep
lowerCamelCase__ : Any = doc_sep
lowerCamelCase__ : List[str] = n_docs
lowerCamelCase__ : Optional[int] = max_combined_length
lowerCamelCase__ : List[Any] = dataset
lowerCamelCase__ : Union[str, Any] = dataset_split
lowerCamelCase__ : str = index_name
lowerCamelCase__ : List[str] = retrieval_vector_size
lowerCamelCase__ : Any = retrieval_batch_size
lowerCamelCase__ : Optional[Any] = passages_path
lowerCamelCase__ : Dict = index_path
lowerCamelCase__ : Optional[Any] = use_dummy_dataset
lowerCamelCase__ : Any = output_retrieved
lowerCamelCase__ : Optional[int] = do_deduplication
lowerCamelCase__ : List[str] = use_cache
if self.forced_eos_token_id is None:
lowerCamelCase__ : int = getattr(self.generator, 'forced_eos_token_id', SCREAMING_SNAKE_CASE_ )
@classmethod
def a__ (cls, lowerCamelCase_, lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict(), generator=generator_config.to_dict(), **SCREAMING_SNAKE_CASE_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCamelCase__ : str = self.question_encoder.to_dict()
lowerCamelCase__ : Union[str, Any] = self.generator.to_dict()
lowerCamelCase__ : Optional[Any] = self.__class__.model_type
return output
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {"configuration_gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["GPTNeoXTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXForCausalLM",
"GPTNeoXForQuestionAnswering",
"GPTNeoXForSequenceClassification",
"GPTNeoXForTokenClassification",
"GPTNeoXLayer",
"GPTNeoXModel",
"GPTNeoXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : Optional[Any] = logging.get_logger(__name__)
class a_ ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = ["""audio_values""", """audio_mask"""]
def __init__(self, lowerCamelCase_=2_0_4_8, lowerCamelCase_=1, lowerCamelCase_=[1_6, 1_6], lowerCamelCase_=1_2_8, lowerCamelCase_=4_4_1_0_0, lowerCamelCase_=8_6, lowerCamelCase_=2_0_4_8, lowerCamelCase_=0.0, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
feature_size=__lowercase, sampling_rate=__lowercase, padding_value=__lowercase, **__lowercase, )
lowerCamelCase__ : Optional[Any] = spectrogram_length
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Optional[int] = patch_size
lowerCamelCase__ : List[Any] = feature_size // self.patch_size[1]
lowerCamelCase__ : List[Any] = n_fft
lowerCamelCase__ : Any = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Any = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=__lowercase, min_frequency=0.0, max_frequency=2_2_0_5_0.0, sampling_rate=__lowercase, norm='slaney', mel_scale='slaney', ).T
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = spectrogram(
__lowercase, window_function(self.n_fft, 'hann' ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters.T, log_mel='dB', db_range=80.0, )
lowerCamelCase__ : Dict = log_spec[:, :-1]
lowerCamelCase__ : List[str] = log_spec - 20.0
lowerCamelCase__ : str = np.clip(log_spec / 40.0, -2.0, 0.0 ) + 1.0
return log_spec
def __call__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = True, lowerCamelCase_ = None, lowerCamelCase_ = False, lowerCamelCase_ = False, **lowerCamelCase_, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase__ : Tuple = isinstance(__lowercase, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase__ : List[Any] = is_batched_numpy or (
isinstance(__lowercase, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Union[str, Any] = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowercase, np.ndarray ):
lowerCamelCase__ : int = np.asarray(__lowercase, dtype=np.floataa )
elif isinstance(__lowercase, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : Optional[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0], __lowercase ):
lowerCamelCase__ : Dict = [np.asarray(__lowercase, dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[str] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : Union[str, Any] = np.array(__lowercase ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Tuple = np.ones([len(__lowercase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : List[Any] = padded_audio_features * self.padding_value
for i in range(len(__lowercase ) ):
lowerCamelCase__ : List[Any] = audio_features[i]
lowerCamelCase__ : str = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : List[Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=__lowercase, tensor_type=__lowercase )
return encoded_inputs
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class a_ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : Any = """camembert"""
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_="absolute", lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a )
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : List[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : List[str] = use_cache
lowerCamelCase__ : Union[str, Any] = classifier_dropout
class a_ ( UpperCamelCase__ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( _UpperCamelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase__ : int = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase__ : str = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase__ : Tuple = {"unk_token": "<unk>"}
lowerCamelCase__ : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
lowerCamelCase__ : List[str] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCamelCase__ : List[str] = os.path.join(self.tmpdirname, UpperCamelCase__ )
with open(self.image_processor_file, 'w', encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__, UpperCamelCase__ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def a__ (self, **lowerCamelCase_ ):
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **UpperCamelCase__ )
def a__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = [np.random.randint(2_5_5, size=(3, 3_0, 4_0_0), dtype=np.uinta )]
lowerCamelCase__ : Any = [Image.fromarray(np.moveaxis(UpperCamelCase__, 0, -1 ) ) for x in image_inputs]
return image_inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.get_tokenizer()
lowerCamelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase__ : Union[str, Any] = self.get_image_processor()
lowerCamelCase__ : Tuple = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer, UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor, UpperCamelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
lowerCamelCase__ : Tuple = self.get_image_processor(do_normalize=UpperCamelCase__, padding_value=1.0 )
lowerCamelCase__ : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, UpperCamelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Any = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCamelCase__ : int = self.prepare_image_inputs()
lowerCamelCase__ : Tuple = image_processor(UpperCamelCase__, return_tensors='np' )
lowerCamelCase__ : Union[str, Any] = processor(images=UpperCamelCase__, return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : str = self.get_tokenizer()
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCamelCase__ : str = "lower newer"
lowerCamelCase__ : Optional[int] = processor(text=UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Any = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCamelCase__ : int = "lower newer"
lowerCamelCase__ : str = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_image_processor()
lowerCamelCase__ : Dict = self.get_tokenizer()
lowerCamelCase__ : Dict = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Optional[Any] = processor.batch_decode(UpperCamelCase__ )
lowerCamelCase__ : int = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = CLIPProcessor(tokenizer=UpperCamelCase__, image_processor=UpperCamelCase__ )
lowerCamelCase__ : List[Any] = "lower newer"
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : str = processor(text=UpperCamelCase__, images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
A_ : str = [
(10_00, '''M'''),
(9_00, '''CM'''),
(5_00, '''D'''),
(4_00, '''CD'''),
(1_00, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
lowerCamelCase__ : int = 0
lowerCamelCase__ : str = 0
while place < len(__lowerCAmelCase ):
if (place + 1 < len(__lowerCAmelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = []
for arabic, roman in ROMAN:
((lowerCamelCase__) , (lowerCamelCase__)) : List[Any] = divmod(__lowerCAmelCase , __lowerCAmelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = {}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if vertex not in self.adjacency:
lowerCamelCase__ : Optional[Any] = {}
self.num_vertices += 1
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
lowerCamelCase__ : List[Any] = weight
lowerCamelCase__ : Optional[int] = weight
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_edges()
for edge in edges:
lowerCamelCase__ : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[Any] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase__ : Optional[int] = edges[i][2] + 1
for edge in edges:
lowerCamelCase__ : int = edge
lowerCamelCase__ : Any = weight
lowerCamelCase__ : Any = weight
def __str__(self ):
'''simple docstring'''
lowerCamelCase__ : str = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase__ : List[Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ (self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def a__ (lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = Graph()
if vertices is None:
lowerCamelCase__ : Any = []
if edges is None:
lowerCamelCase__ : Any = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[Any] = {}
def __len__(self ):
'''simple docstring'''
return len(self.parent )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = item
lowerCamelCase__ : Union[str, Any] = 0
return item
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
lowerCamelCase__ : int = self.find(self.parent[item] )
return self.parent[item]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.find(lowerCamelCase_ )
lowerCamelCase__ : Dict = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase__ : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase__ : Union[str, Any] = roota
return roota
return None
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = graph.num_vertices
lowerCamelCase__ : Optional[Any] = Graph.UnionFind()
lowerCamelCase__ : Optional[Any] = []
while num_components > 1:
lowerCamelCase__ : Optional[int] = {}
for vertex in graph.get_vertices():
lowerCamelCase__ : Union[str, Any] = -1
lowerCamelCase__ : Tuple = graph.get_edges()
for edge in edges:
lowerCamelCase__ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase__ : List[str] = edge
lowerCamelCase__ : List[str] = union_find.find(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase__ : Any = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_, lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase__ : Union[str, Any] = num_components - 1
lowerCamelCase__ : Union[str, Any] = Graph.build(edges=lowerCamelCase_ )
return mst
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=2, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_6, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=6, lowerCamelCase_=6, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, lowerCamelCase_=1_0_0_0, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : int = image_size
lowerCamelCase__ : Dict = patch_size
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : str = coordinate_size
lowerCamelCase__ : Optional[Any] = shape_size
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Optional[Any] = num_choices
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ : List[Any] = text_seq_length
lowerCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ : Optional[int] = self.text_seq_length + self.image_seq_length
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
lowerCamelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ : List[str] = bbox[i, j, 3]
lowerCamelCase__ : List[Any] = bbox[i, j, 1]
lowerCamelCase__ : int = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ : Tuple = bbox[i, j, 2]
lowerCamelCase__ : Optional[int] = bbox[i, j, 0]
lowerCamelCase__ : Tuple = tmp_coordinate
lowerCamelCase__ : Tuple = tf.constant(lowerCamelCase_ )
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ : Dict = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCamelCase__ : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFLayoutLMvaModel(config=lowerCamelCase_ )
# text + image
lowerCamelCase__ : Dict = model(lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
lowerCamelCase__ : Any = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, training=lowerCamelCase_, )
lowerCamelCase__ : int = model(lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ : str = model(lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ : Tuple = model({'pixel_values': pixel_values}, training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = TFLayoutLMvaForTokenClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(lowerCamelCase__) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Optional[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = {
k: tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase_, tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Dict = tf.ones(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
lowerCamelCase__ : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFLayoutLMvaModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
if getattr(lowerCamelCase_, 'hf_compute_loss', lowerCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ : Dict = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=lowerCamelCase_ )[0]
]
lowerCamelCase__ : Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ : int = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepared_for_class.pop('input_ids' )
lowerCamelCase__ : str = model(lowerCamelCase_, **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ : List[str] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ : int = -1_0_0
lowerCamelCase__ : str = tf.convert_to_tensor(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ : Dict = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ : Any = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call ).parameters
lowerCamelCase__ : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ : Optional[Any] = {0: 'input_ids'}
for label_key in label_keys:
lowerCamelCase__ : Optional[Any] = signature_names.index(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = label_key
lowerCamelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ : int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ : str = prepared_for_class[value]
lowerCamelCase__ : str = tuple(lowerCamelCase_ )
# Send to model
lowerCamelCase__ : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a__ (self ):
'''simple docstring'''
(
lowerCamelCase__
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
lowerCamelCase__
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
lowerCamelCase__
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
lowerCamelCase__
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
lowerCamelCase__
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='tf' ).pixel_values
lowerCamelCase__ : List[str] = tf.constant([[1, 2]] )
lowerCamelCase__ : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ), axis=0 )
# forward pass
lowerCamelCase__ : List[str] = model(input_ids=lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCamelCase__ : Any = _modexpt(_lowerCamelCase , exponent // 2 , _lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCamelCase , exponent - 1 , _lowerCamelCase )) % modulo_value
def lowerCamelCase_ ( _lowerCamelCase = 1777 , _lowerCamelCase = 1855 , _lowerCamelCase = 8 ):
lowerCamelCase__ : Dict = base
for _ in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[str] = _modexpt(_lowerCamelCase , _lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(f"{solution() = }")
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowerCamelCase__ : Dict = gray_code_sequence_string(_lowerCamelCase )
#
# convert them to integers
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def lowerCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase__ : int = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase__ : str = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase__ : Tuple = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase__ : Tuple = '0' + smaller_sequence[i]
sequence.append(_lowerCamelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase__ : int = '1' + smaller_sequence[i]
sequence.append(_lowerCamelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Tuple = logging.get_logger(__name__)
A_ : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 'mvp'
lowerCamelCase__ : List[Any] = ['past_key_values']
lowerCamelCase__ : Tuple = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self, lowerCamelCase_=5_0_2_6_7, lowerCamelCase_=1_0_2_4, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_6, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_="gelu", lowerCamelCase_=1_0_2_4, lowerCamelCase_=0.1, lowerCamelCase_=0.0, lowerCamelCase_=0.0, lowerCamelCase_=0.02, lowerCamelCase_=0.0, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=True, lowerCamelCase_=2, lowerCamelCase_=2, lowerCamelCase_=False, lowerCamelCase_=1_0_0, lowerCamelCase_=8_0_0, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Any = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : Tuple = d_model
lowerCamelCase__ : Tuple = encoder_ffn_dim
lowerCamelCase__ : Dict = encoder_layers
lowerCamelCase__ : List[str] = encoder_attention_heads
lowerCamelCase__ : Optional[Any] = decoder_ffn_dim
lowerCamelCase__ : List[str] = decoder_layers
lowerCamelCase__ : int = decoder_attention_heads
lowerCamelCase__ : Tuple = dropout
lowerCamelCase__ : Optional[Any] = attention_dropout
lowerCamelCase__ : Optional[Any] = activation_dropout
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Any = init_std
lowerCamelCase__ : Union[str, Any] = encoder_layerdrop
lowerCamelCase__ : List[Any] = decoder_layerdrop
lowerCamelCase__ : List[Any] = classifier_dropout
lowerCamelCase__ : Optional[Any] = use_cache
lowerCamelCase__ : List[Any] = encoder_layers
lowerCamelCase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ : Optional[int] = use_prompt
lowerCamelCase__ : int = prompt_length
lowerCamelCase__ : int = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, is_encoder_decoder=lowerCamelCase_, decoder_start_token_id=lowerCamelCase_, forced_eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
'The config can simply be saved and uploaded again to be fixed.' )
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import functools
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_lowerCamelCase ) != 3 or not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_lowerCamelCase ) == 0:
return 0
if min(_lowerCamelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_lowerCamelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
lowerCamelCase__ : Dict = set(_lowerCamelCase )
@functools.cache
def dynamic_programming(_lowerCamelCase ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A_ : Any = 3
def lowerCamelCase_ ( _lowerCamelCase ):
print('Generating primitive root of p' )
while True:
lowerCamelCase__ : str = random.randrange(3 , _lowerCamelCase )
if pow(_lowerCamelCase , 2 , _lowerCamelCase ) == 1:
continue
if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) == 1:
continue
return g
def lowerCamelCase_ ( _lowerCamelCase ):
print('Generating prime p...' )
lowerCamelCase__ : Tuple = rabin_miller.generate_large_prime(_lowerCamelCase ) # select large prime number.
lowerCamelCase__ : Union[str, Any] = primitive_root(_lowerCamelCase ) # one primitive root on modulo p.
lowerCamelCase__ : Dict = random.randrange(3 , _lowerCamelCase ) # private_key -> have to be greater than 2 for safety.
lowerCamelCase__ : Optional[Any] = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
lowerCamelCase__ : int = (key_size, e_a, e_a, p)
lowerCamelCase__ : List[Any] = (key_size, d)
return public_key, private_key
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('\nWARNING:' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'Use a different name or delete these files and re-run this program.' )
sys.exit()
lowerCamelCase__ : int = generate_key(_lowerCamelCase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , 'w' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , 'w' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def lowerCamelCase_ ( ):
print('Making key files...' )
make_key_files('elgamal' , 2048 )
print('Key files generation successful' )
if __name__ == "__main__":
main()
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = ConsistencyModelPipeline
lowerCamelCase__ : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCamelCase__ : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowerCamelCase__ : Tuple = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet', )
return unet
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test', subfolder='test_unet_class_cond', )
return unet
def a__ (self, lowerCamelCase_=False ):
'''simple docstring'''
if class_cond:
lowerCamelCase__ : Any = self.dummy_cond_unet
else:
lowerCamelCase__ : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCamelCase__ : Dict = CMStochasticIterativeScheduler(
num_train_timesteps=4_0, sigma_min=0.002, sigma_max=80.0, )
lowerCamelCase__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : str = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : int = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [2_2, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Dict = self.get_dummy_components()
lowerCamelCase__ : List[str] = ConsistencyModelPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Tuple = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : str = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any = self.get_dummy_components(class_cond=lowerCamelCase_ )
lowerCamelCase__ : Dict = ConsistencyModelPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[Any] = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCamelCase__ : int = ConsistencyModelPipeline(**lowerCamelCase_ )
lowerCamelCase__ : List[Any] = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Optional[int] = self.get_dummy_components(class_cond=lowerCamelCase_ )
lowerCamelCase__ : Tuple = ConsistencyModelPipeline(**lowerCamelCase_ )
lowerCamelCase__ : str = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict = self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[str] = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : Any = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self, lowerCamelCase_=0, lowerCamelCase_=False, lowerCamelCase_="cpu", lowerCamelCase_=torch.floataa, lowerCamelCase_=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Tuple = {
'num_inference_steps': None,
'timesteps': [2_2, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCamelCase__ : Tuple = self.get_fixed_latents(seed=lowerCamelCase_, device=lowerCamelCase_, dtype=lowerCamelCase_, shape=lowerCamelCase_ )
lowerCamelCase__ : List[str] = latents
return inputs
def a__ (self, lowerCamelCase_=0, lowerCamelCase_="cpu", lowerCamelCase_=torch.floataa, lowerCamelCase_=(1, 3, 6_4, 6_4) ):
'''simple docstring'''
if type(lowerCamelCase_ ) == str:
lowerCamelCase__ : int = torch.device(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = randn_tensor(lowerCamelCase_, generator=lowerCamelCase_, device=lowerCamelCase_, dtype=lowerCamelCase_ )
return latents
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2' )
lowerCamelCase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=4_0, sigma_min=0.002, sigma_max=80.0, )
lowerCamelCase__ : int = ConsistencyModelPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pipe.to(torch_device=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = self.get_inputs()
lowerCamelCase__ : str = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : str = image[0, -3:, -3:, -1]
lowerCamelCase__ : Tuple = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2' )
lowerCamelCase__ : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0, sigma_min=0.002, sigma_max=80.0, )
lowerCamelCase__ : List[Any] = ConsistencyModelPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pipe.to(torch_device=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = self.get_inputs()
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : Any = None
lowerCamelCase__ : Any = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2' )
lowerCamelCase__ : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=4_0, sigma_min=0.002, sigma_max=80.0, )
lowerCamelCase__ : Dict = ConsistencyModelPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pipe.to(torch_device=lowerCamelCase_, torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict = self.get_inputs(get_fixed_latents=lowerCamelCase_, device=lowerCamelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase_, enable_math=lowerCamelCase_, enable_mem_efficient=lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : str = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = UNetaDModel.from_pretrained('diffusers/consistency_models', subfolder='diffusers_cd_imagenet64_l2' )
lowerCamelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0, sigma_min=0.002, sigma_max=80.0, )
lowerCamelCase__ : int = ConsistencyModelPipeline(unet=lowerCamelCase_, scheduler=lowerCamelCase_ )
pipe.to(torch_device=lowerCamelCase_, torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = self.get_inputs(get_fixed_latents=lowerCamelCase_, device=lowerCamelCase_ )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCamelCase_, enable_math=lowerCamelCase_, enable_mem_efficient=lowerCamelCase_ ):
lowerCamelCase__ : List[str] = pipe(**lowerCamelCase_ ).images
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowerCamelCase__ : List[str] = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
from typing import Any
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = data
lowerCamelCase__ : Optional[Any] = None
def __repr__(self ):
'''simple docstring'''
return f'''Node({self.data})'''
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = None
def __iter__(self ):
'''simple docstring'''
lowerCamelCase__ : str = self.head
while node:
yield node.data
lowerCamelCase__ : List[Any] = node.next
def __len__(self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__(self ):
'''simple docstring'''
return "->".join([str(lowerCamelCase_ ) for item in self] )
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCamelCase__ : Union[str, Any] = self.head
for _ in range(lowerCamelCase_ ):
lowerCamelCase__ : Tuple = current.next
lowerCamelCase__ : str = data
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
self.insert_nth(len(self ), lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
self.insert_nth(0, lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCamelCase__ : Optional[Any] = Node(lowerCamelCase_ )
if self.head is None:
lowerCamelCase__ : List[str] = new_node
elif index == 0:
lowerCamelCase__ : int = self.head # link new_node to head
lowerCamelCase__ : List[Any] = new_node
else:
lowerCamelCase__ : Optional[int] = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Union[str, Any] = temp.next
lowerCamelCase__ : List[str] = temp.next
lowerCamelCase__ : str = new_node
def a__ (self ): # print every node data
'''simple docstring'''
print(self )
def a__ (self ):
'''simple docstring'''
return self.delete_nth(0 )
def a__ (self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def a__ (self, lowerCamelCase_ = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCamelCase__ : str = self.head # default first node
if index == 0:
lowerCamelCase__ : Any = self.head.next
else:
lowerCamelCase__ : str = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : int = temp.next
lowerCamelCase__ : List[Any] = temp.next
lowerCamelCase__ : int = temp.next.next
return delete_node.data
def a__ (self ):
'''simple docstring'''
return self.head is None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = None
lowerCamelCase__ : Union[str, Any] = self.head
while current:
# Store the current node's next node.
lowerCamelCase__ : int = current.next
# Make the current node's next point backwards
lowerCamelCase__ : Dict = prev
# Make the previous node be the current node
lowerCamelCase__ : Optional[Any] = current
# Make the current node the next node (to progress iteration)
lowerCamelCase__ : Dict = next_node
# Return prev in order to put the head at the end
lowerCamelCase__ : Optional[Any] = prev
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(_lowerCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_lowerCamelCase ) == i
linked_list.insert_nth(_lowerCamelCase , i + 1 )
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_lowerCamelCase ) == 9
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCamelCase__ : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_lowerCamelCase ) == "->".join(str(_lowerCamelCase ) for i in range(-8 , 1 ) )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[int] = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCamelCase__ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(_lowerCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_lowerCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCamelCase__ : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCamelCase__ : List[Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCamelCase__ : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(_lowerCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_lowerCamelCase )
assert (
str(_lowerCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_lowerCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase_ ( ):
from doctest import testmod
testmod()
lowerCamelCase__ : int = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_lowerCamelCase )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowerCamelCase__ : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_lowerCamelCase )
print(f'''length of linked_list is : {len(_lowerCamelCase )}''' )
if __name__ == "__main__":
main()
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
A_ : Optional[Any] = 9.80665
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = g ):
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
def lowerCamelCase_ ( _lowerCamelCase ):
if not numbers:
return 0
if not isinstance(_lowerCamelCase , (list, tuple) ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
lowerCamelCase__ : str = numbers[0]
for i in range(1 , len(_lowerCamelCase ) ):
# update the maximum and minimum subarray products
lowerCamelCase__ : List[Any] = numbers[i]
if number < 0:
lowerCamelCase__ : Dict = min_till_now, max_till_now
lowerCamelCase__ : Tuple = max(_lowerCamelCase , max_till_now * number )
lowerCamelCase__ : int = min(_lowerCamelCase , min_till_now * number )
# update the maximum product found till now
lowerCamelCase__ : Dict = max(_lowerCamelCase , _lowerCamelCase )
return max_prod
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=7, lowerCamelCase_=3, lowerCamelCase_=3_0, lowerCamelCase_=4_0_0, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=True, lowerCamelCase_=1 / 2_5_5, lowerCamelCase_=True, ):
'''simple docstring'''
lowerCamelCase__ : Any = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : Optional[int] = size
lowerCamelCase__ : int = do_normalize
lowerCamelCase__ : Optional[Any] = image_mean
lowerCamelCase__ : Any = image_std
lowerCamelCase__ : Dict = do_rescale
lowerCamelCase__ : Any = rescale_factor
lowerCamelCase__ : Optional[Any] = do_pad
def a__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a__ (self, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
if not batched:
lowerCamelCase__ : int = image_inputs[0]
if isinstance(lowerCamelCase_, Image.Image ):
lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : Union[str, Any] = int(self.size['shortest_edge'] * h / w )
lowerCamelCase__ : Any = self.size['shortest_edge']
elif w > h:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : List[str] = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase__ : Optional[Any] = self.size['shortest_edge']
lowerCamelCase__ : int = self.size['shortest_edge']
else:
lowerCamelCase__ : Optional[int] = []
for image in image_inputs:
lowerCamelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Union[str, Any] = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[0] )[0]
lowerCamelCase__ : Dict = max(lowerCamelCase_, key=lambda lowerCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : str = DeformableDetrImageProcessor if is_vision_available() else None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = DeformableDetrImageProcessingTester(self )
@property
def a__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_, 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_std' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_rescale' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_pad' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'size' ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict, size=4_2, max_size=8_4, pad_and_return_pixel_mask=lowerCamelCase_ )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, np.ndarray )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ : Dict = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
lowerCamelCase__ : Tuple = self.image_processor_tester.get_expected_values(lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowerCamelCase__ : Optional[int] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
lowerCamelCase__ : Any = self.image_processor_tester.get_expected_values(lowerCamelCase_, batched=lowerCamelCase_ )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f:
lowerCamelCase__ : Tuple = json.loads(f.read() )
lowerCamelCase__ : Dict = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
lowerCamelCase__ : Any = DeformableDetrImageProcessor()
lowerCamelCase__ : List[Any] = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowerCamelCase__ : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowerCamelCase__ : str = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1e-4 ) )
# verify area
lowerCamelCase__ : Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : Dict = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowerCamelCase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowerCamelCase__ : Any = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify orig_size
lowerCamelCase__ : Optional[int] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowerCamelCase__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f:
lowerCamelCase__ : Any = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
lowerCamelCase__ : Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase__ : Union[str, Any] = DeformableDetrImageProcessor(format='coco_panoptic' )
lowerCamelCase__ : str = image_processing(images=lowerCamelCase_, annotations=lowerCamelCase_, masks_path=lowerCamelCase_, return_tensors='pt' )
# verify pixel values
lowerCamelCase__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], lowerCamelCase_, atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'], lowerCamelCase_ ) )
# verify boxes
lowerCamelCase__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape, lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], lowerCamelCase_, atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], lowerCamelCase_ ) )
# verify is_crowd
lowerCamelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], lowerCamelCase_ ) )
# verify class_labels
lowerCamelCase__ : str = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], lowerCamelCase_ ) )
# verify masks
lowerCamelCase__ : int = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item(), lowerCamelCase_ )
# verify orig_size
lowerCamelCase__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], lowerCamelCase_ ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'], lowerCamelCase_ ) )
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'megatron-bert'
def __init__(self, lowerCamelCase_=2_9_0_5_6, lowerCamelCase_=1_0_2_4, lowerCamelCase_=2_4, lowerCamelCase_=1_6, lowerCamelCase_=4_0_9_6, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_="absolute", lowerCamelCase_=True, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : List[Any] = use_cache
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : List[str] = "▁"
A_ : Tuple = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Optional[int] = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
A_ : List[Any] = {
"xlm-roberta-base": 5_12,
"xlm-roberta-large": 5_12,
"xlm-roberta-large-finetuned-conll02-dutch": 5_12,
"xlm-roberta-large-finetuned-conll02-spanish": 5_12,
"xlm-roberta-large-finetuned-conll03-english": 5_12,
"xlm-roberta-large-finetuned-conll03-german": 5_12,
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
def __init__(self, lowerCamelCase_, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowerCamelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ : Optional[Any] = 1
lowerCamelCase__ : Tuple = len(self.sp_model ) + self.fairseq_offset
lowerCamelCase__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : int = self.__dict__.copy()
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : List[str] = {}
lowerCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : List[str] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : List[str] = [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ (self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ : Tuple = self.sp_model.PieceToId(lowerCamelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_, ' ' ).strip()
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : Optional[Any] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (out_vocab_file,)
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A_ : List[Any] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
A_ : List[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
A_ : Dict = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ = 1, lowerCamelCase_ = 4, ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase_, hypotheses=lowerCamelCase_, min_len=lowerCamelCase_, max_len=lowerCamelCase_ )
}
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
A_ : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase_ ( ):
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_lowerCamelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_lowerCamelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_lowerCamelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_lowerCamelCase , default='data/dump' , help='The dump file prefix.' )
lowerCamelCase__ : Dict = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCamelCase__ : Any = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Any = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCamelCase__ : str = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase__ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCamelCase__ : Union[str, Any] = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase__ : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCamelCase__ : Tuple = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCamelCase__ : str = fp.readlines()
logger.info('Start encoding' )
logger.info(f'''{len(_lowerCamelCase )} examples to process.''' )
lowerCamelCase__ : int = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Union[str, Any] = 1_0000
lowerCamelCase__ : Tuple = time.time()
for text in data:
lowerCamelCase__ : Optional[Any] = f'''{bos} {text.strip()} {sep}'''
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
lowerCamelCase__ : Union[str, Any] = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCamelCase__ : List[Any] = time.time()
logger.info('Finished binarization' )
logger.info(f'''{len(_lowerCamelCase )} examples processed.''' )
lowerCamelCase__ : Any = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCamelCase__ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase__ : List[Any] = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
lowerCamelCase__ : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_lowerCamelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _lowerCamelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCamelCase__ : Union[str, Any] = grid.shape
lowerCamelCase__ : List[str] = [-1, 1, 0, 0]
lowerCamelCase__ : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase__ : Any = [(0, source)], set()
lowerCamelCase__ : int = np.full((rows, cols) , np.inf )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[str] = np.empty((rows, cols) , dtype=_lowerCamelCase )
lowerCamelCase__ : List[str] = None
while queue:
(lowerCamelCase__) : Union[str, Any] = heappop(_lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase__ : Optional[int] = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase__ : Any = predecessors[x, y]
path.append(_lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowerCamelCase ) ):
lowerCamelCase__ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase__ : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowerCamelCase , (dist + 1, (nx, ny)) )
lowerCamelCase__ : Optional[Any] = dist + 1
lowerCamelCase__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A_ : Optional[int] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 10_00,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : str = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 10_00,
"block_out_channels": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : Dict = {
"sample_size": 2_56,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A_ : List[str] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A_ : Union[str, Any] = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A_ : Union[str, Any] = {
"num_train_timesteps": 1_51,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def lowerCamelCase_ ( _lowerCamelCase ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[str] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[int] = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Tuple = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : List[str] = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase , map_location='cpu' )
lowerCamelCase__ : str = {}
lowerCamelCase__ : Any = checkpoint['time_embed.0.weight']
lowerCamelCase__ : Any = checkpoint['time_embed.0.bias']
lowerCamelCase__ : int = checkpoint['time_embed.2.weight']
lowerCamelCase__ : Optional[Any] = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : List[Any] = checkpoint['label_emb.weight']
lowerCamelCase__ : Union[str, Any] = checkpoint['input_blocks.0.0.weight']
lowerCamelCase__ : Tuple = checkpoint['input_blocks.0.0.bias']
lowerCamelCase__ : Tuple = unet_config['down_block_types']
lowerCamelCase__ : Any = unet_config['layers_per_block']
lowerCamelCase__ : int = unet_config['attention_head_dim']
lowerCamelCase__ : str = unet_config['block_out_channels']
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Union[str, Any] = channels_list[0]
for i, layer_type in enumerate(_lowerCamelCase ):
lowerCamelCase__ : Dict = channels_list[i]
lowerCamelCase__ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_lowerCamelCase ):
lowerCamelCase__ : Dict = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[int] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : int = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_lowerCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
lowerCamelCase__ : str = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : int = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : int = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
lowerCamelCase__ : int = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
lowerCamelCase__ : Dict = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Optional[int] = 'mid_block.resnets.0'
lowerCamelCase__ : List[str] = 'middle_block.0'
lowerCamelCase__ : str = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = 'mid_block.attentions.0'
lowerCamelCase__ : List[Any] = 'middle_block.1'
lowerCamelCase__ : Dict = convert_attention(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[int] = 'mid_block.resnets.1'
lowerCamelCase__ : int = 'middle_block.2'
lowerCamelCase__ : Union[str, Any] = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = unet_config['up_block_types']
for i, layer_type in enumerate(_lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : List[str] = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Any = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : Any = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : int = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_skip=_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : List[str] = convert_attention(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
current_layer += 1
if i != len(_lowerCamelCase ) - 1:
lowerCamelCase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : int = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : str = convert_resnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Optional[Any] = checkpoint['out.0.weight']
lowerCamelCase__ : Dict = checkpoint['out.0.bias']
lowerCamelCase__ : List[Any] = checkpoint['out.2.weight']
lowerCamelCase__ : List[Any] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
A_ : Tuple = parser.parse_args()
A_ : Optional[Any] = strabool(args.class_cond)
A_ : int = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
A_ : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ : Optional[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A_ : Dict = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
A_ : Dict = None
A_ : List[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
A_ : Optional[int] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A_ : str = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A_ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A_ : Dict = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
A_ : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config)
A_ : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 368
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Tuple = 2
lowerCamelCase__ : List[str] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCamelCase )
if n > 1:
factors.append(_lowerCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
while second != 0:
lowerCamelCase__ : Tuple = first & second
first ^= second
lowerCamelCase__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Tuple = int(input("Enter the first number: ").strip())
A_ : Union[str, Any] = int(input("Enter the second number: ").strip())
print(f"{add(first, second) = }")
| 316
| 0
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ''
lowerCamelCase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase__ : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self, lowerCamelCase_ = "", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_ ):
'''simple docstring'''
super().__init__(self, **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase__ : Optional[Any] = fsspec.open(
lowerCamelCase_, mode='rb', protocol=lowerCamelCase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
lowerCamelCase__ : int = os.path.basename(self.file.path.split('::' )[0] )
lowerCamelCase__ : str = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
lowerCamelCase__ : List[Any] = None
@classmethod
def a__ (cls, lowerCamelCase_ ):
'''simple docstring'''
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def a__ (self ):
'''simple docstring'''
if self.dir_cache is None:
lowerCamelCase__ : Any = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
lowerCamelCase__ : int = {f['name']: f}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.file.open().read()
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "rb", lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Dict = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = 'bz2'
lowerCamelCase__ : List[str] = 'bz2'
lowerCamelCase__ : int = '.bz2'
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'gzip'
lowerCamelCase__ : List[str] = 'gzip'
lowerCamelCase__ : Union[str, Any] = '.gz'
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'lz4'
lowerCamelCase__ : int = 'lz4'
lowerCamelCase__ : Dict = '.lz4'
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'xz'
lowerCamelCase__ : Optional[int] = 'xz'
lowerCamelCase__ : Dict = '.xz'
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'zstd'
lowerCamelCase__ : Dict = 'zstd'
lowerCamelCase__ : List[Any] = '.zst'
def __init__(self, lowerCamelCase_, lowerCamelCase_ = "rb", lowerCamelCase_ = None, lowerCamelCase_ = None, lowerCamelCase_ = DEFAULT_BLOCK_SIZE, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
fo=lowerCamelCase_, mode=lowerCamelCase_, target_protocol=lowerCamelCase_, target_options=lowerCamelCase_, block_size=lowerCamelCase_, **lowerCamelCase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase__ : List[str] = self.file.__enter__
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = file_
def __enter__(self ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
self._file.__exit__(*lowerCamelCase_, **lowerCamelCase_ )
def __iter__(self ):
'''simple docstring'''
return iter(self._file )
def a__ (self ):
'''simple docstring'''
return next(self._file )
def __getattr__(self, lowerCamelCase_ ):
'''simple docstring'''
return getattr(self._file, lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_, **lowerCamelCase_ ):
return WrappedFile(_enter(*lowerCamelCase_, **lowerCamelCase_ ) )
lowerCamelCase__ : Tuple = fixed_enter
| 370
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A_ : List[str] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : int = len(_lowerCamelCase ) # No of vertices in graph
lowerCamelCase__ : Optional[int] = [0] * n
lowerCamelCase__ : str = [False] * n
def dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , id_ )
lowerCamelCase__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowerCamelCase__ : Optional[Any] = min(low[at] , low[to] )
lowerCamelCase__ : list[tuple[int, int]] = []
for i in range(_lowerCamelCase ):
if not visited[i]:
dfs(_lowerCamelCase , -1 , _lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316
| 0
|
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
A_ : str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
A_ : list[int] = [ord(letter) for letter in string.ascii_lowercase]
A_ : set[int] = {ord(char) for char in VALID_CHARS}
A_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
lowerCamelCase__ : str = ""
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
for keychar, cipherchar in zip(cycle(_lowerCamelCase ) , _lowerCamelCase ):
lowerCamelCase__ : str = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_lowerCamelCase )
return decoded
def lowerCamelCase_ ( _lowerCamelCase ):
"""simple docstring"""
lowerCamelCase__ : list[str] = []
for key in product(_lowerCamelCase , repeat=3 ):
lowerCamelCase__ : Union[str, Any] = try_key(_lowerCamelCase , _lowerCamelCase )
if encoded is not None:
possibles.append(_lowerCamelCase )
return possibles
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
return [possible for possible in possibles if common_word in possible.lower()]
def lowerCamelCase_ ( _lowerCamelCase = "p059_cipher.txt" ):
"""simple docstring"""
lowerCamelCase__ : list[int]
lowerCamelCase__ : list[str]
lowerCamelCase__ : str
lowerCamelCase__ : str
lowerCamelCase__ : str = Path(_lowerCamelCase ).parent.joinpath(_lowerCamelCase ).read_text(encoding='utf-8' )
lowerCamelCase__ : Optional[Any] = [int(_lowerCamelCase ) for number in data.strip().split(',' )]
lowerCamelCase__ : Tuple = filter_valid_chars(_lowerCamelCase )
for common_word in COMMON_WORDS:
lowerCamelCase__ : Dict = filter_common_word(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) == 1:
break
lowerCamelCase__ : List[Any] = possibles[0]
return sum(ord(_lowerCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 351
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 316
| 0
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 352
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ : List[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class a_ ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ : Optional[datasets.Features] = None
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , ):
import pyspark
def generate_fn():
lowerCamelCase__ : Optional[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) )
for partition_id in partition_order:
lowerCamelCase__ : Dict = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' )
lowerCamelCase__ : Dict = partition_df.collect()
lowerCamelCase__ : int = 0
for row in rows:
yield f'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class a_ ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=None, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = df
lowerCamelCase__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase__ : List[Any] = _generate_iterable_examples(self.df, self.partition_order )
def __iter__(self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.split_shard_indices_by_worker(lowerCamelCase_, lowerCamelCase_ )
return SparkExamplesIterable(self.df, partition_order=lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
return len(self.partition_order )
class a_ ( datasets.DatasetBuilder ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = SparkConfig
def __init__(self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase__ : Optional[Any] = df
lowerCamelCase__ : Dict = working_dir
super().__init__(
cache_dir=lowerCamelCase_, config_name=str(self.df.semanticHash() ), **lowerCamelCase_, )
def a__ (self ):
'''simple docstring'''
def create_cache_and_write_probe(lowerCamelCase_ ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir, exist_ok=lowerCamelCase_ )
lowerCamelCase__ : str = os.path.join(self._cache_dir, 'fs_test' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase_, 'a' )
return [probe_file]
if self._spark.conf.get('spark.master', '' ).startswith('local' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase__ : Tuple = (
self._spark.sparkContext.parallelize(range(1 ), 1 ).mapPartitions(lowerCamelCase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' )
def a__ (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(lowerCamelCase_ ):
for batch in it:
yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} )
lowerCamelCase__ : List[Any] = self.df.count()
lowerCamelCase__ : List[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase__ : List[Any] = (
self.df.limit(lowerCamelCase_ )
.repartition(1 )
.mapInArrow(lowerCamelCase_, 'batch_bytes: long' )
.agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase__ : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase__ : str = min(lowerCamelCase_, int(approx_total_size / max_shard_size ) )
lowerCamelCase__ : List[Any] = self.df.repartition(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
'''simple docstring'''
import pyspark
lowerCamelCase__ : List[str] = ParquetWriter if file_format == 'parquet' else ArrowWriter
lowerCamelCase__ : List[str] = os.path.join(self._working_dir, os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath
lowerCamelCase__ : Optional[int] = file_format == 'parquet'
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase__ : int = self.config.features
lowerCamelCase__ : Dict = self._writer_batch_size
lowerCamelCase__ : Optional[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase_ ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase__ : Any = pyspark.TaskContext().taskAttemptId()
lowerCamelCase__ : str = next(lowerCamelCase_, lowerCamelCase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes'], )
lowerCamelCase__ : Tuple = 0
lowerCamelCase__ : Any = writer_class(
features=lowerCamelCase_, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
shard_id += 1
lowerCamelCase__ : Dict = writer_class(
features=writer._features, path=working_fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), writer_batch_size=lowerCamelCase_, storage_options=lowerCamelCase_, embed_local_files=lowerCamelCase_, )
lowerCamelCase__ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase_ )
if writer._num_bytes > 0:
lowerCamelCase__ , lowerCamelCase__ : Tuple = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes'], )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase_ ), os.path.basename(lowerCamelCase_ ) )
shutil.move(lowerCamelCase_, lowerCamelCase_ )
lowerCamelCase__ : List[str] = (
self.df.mapInArrow(lowerCamelCase_, 'task_id: long, num_examples: long, num_bytes: long' )
.groupBy('task_id' )
.agg(
pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ), pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ), pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ), pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ), )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def a__ (self, lowerCamelCase_, lowerCamelCase_ = "arrow", lowerCamelCase_ = None, lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
self._validate_cache_dir()
lowerCamelCase__ : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase_ )
lowerCamelCase__ : str = not is_remote_filesystem(self._fs )
lowerCamelCase__ : Any = os.path.join if is_local else posixpath.join
lowerCamelCase__ : Any = '-TTTTT-SSSSS-of-NNNNN'
lowerCamelCase__ : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowerCamelCase__ : Union[str, Any] = path_join(self._output_dir, lowerCamelCase_ )
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Dict = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[str] = []
for task_id, content in self._prepare_split_single(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase_ )
lowerCamelCase__ : str = total_num_examples
lowerCamelCase__ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowerCamelCase__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase__ : Optional[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, ):
rename(
lowerCamelCase_, fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace('TTTTT-SSSSS', f'''{global_shard_id:05d}''' ).replace('NNNNN', f'''{total_shards:05d}''' ), )
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : List[str] = 0
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ , lowerCamelCase__ : Any = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase_, len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect()
else:
# don't use any pattern
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('SSSSS', f'''{shard_id:05d}''' ).replace('TTTTT', f'''{task_id:05d}''' ), fpath.replace(lowerCamelCase_, '' ), )
def a__ (self, lowerCamelCase_, ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCamelCase__ : str = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_lowerCamelCase ) )
return round(_lowerCamelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = len(lowerCamelCase_ )
lowerCamelCase__ : Any = [0] * len_array
if len_array > 0:
lowerCamelCase__ : Union[str, Any] = array[0]
for i in range(1, lowerCamelCase_ ):
lowerCamelCase__ : Optional[int] = self.prefix_sum[i - 1] + array[i]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
"""simple docstring"""
from math import isqrt, loga
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = False
return [i for i in range(2 , _lowerCamelCase ) if is_prime[i]]
def lowerCamelCase_ ( _lowerCamelCase = 80_0800 , _lowerCamelCase = 80_0800 ):
lowerCamelCase__ : Optional[Any] = degree * loga(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = int(_lowerCamelCase )
lowerCamelCase__ : List[Any] = calculate_prime_numbers(_lowerCamelCase )
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Dict = len(_lowerCamelCase ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 354
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = ['image_processor', 'tokenizer']
lowerCamelCase__ : Optional[int] = 'CLIPImageProcessor'
lowerCamelCase__ : List[str] = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__(self, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', lowerCamelCase_, )
lowerCamelCase__ : int = kwargs.pop('feature_extractor' )
lowerCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __call__(self, lowerCamelCase_=None, lowerCamelCase_=None, lowerCamelCase_=None, **lowerCamelCase_ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ : Any = self.tokenizer(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if images is not None:
lowerCamelCase__ : List[Any] = self.image_processor(lowerCamelCase_, return_tensors=lowerCamelCase_, **lowerCamelCase_ )
if text is not None and images is not None:
lowerCamelCase__ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ), tensor_type=lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_, **lowerCamelCase_ )
def a__ (self, *lowerCamelCase_, **lowerCamelCase_ ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_, **lowerCamelCase_ )
@property
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.tokenizer.model_input_names
lowerCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 316
| 0
|
"""simple docstring"""
import numpy as np
def lowerCamelCase_ ( _lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase_ ( _lowerCamelCase ):
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
"""simple docstring"""
import cva
import numpy as np
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if k in (0.04, 0.06):
lowerCamelCase__ : Tuple = k
lowerCamelCase__ : Optional[Any] = window_size
else:
raise ValueError('invalid k value' )
def __str__(self ):
'''simple docstring'''
return str(self.k )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = cva.imread(lowerCamelCase_, 0 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = img.shape
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : Optional[Any] = img.copy()
lowerCamelCase__ : Optional[Any] = cva.cvtColor(lowerCamelCase_, cva.COLOR_GRAY2RGB )
lowerCamelCase__ , lowerCamelCase__ : Any = np.gradient(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = dx**2
lowerCamelCase__ : List[Any] = dy**2
lowerCamelCase__ : List[str] = dx * dy
lowerCamelCase__ : Tuple = 0.04
lowerCamelCase__ : List[Any] = self.window_size // 2
for y in range(lowerCamelCase_, h - offset ):
for x in range(lowerCamelCase_, w - offset ):
lowerCamelCase__ : Union[str, Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : Optional[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowerCamelCase__ : str = (wxx * wyy) - (wxy**2)
lowerCamelCase__ : Dict = wxx + wyy
lowerCamelCase__ : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0), 0 )
color_img.itemset((y, x, 1), 0 )
color_img.itemset((y, x, 2), 2_5_5 )
return color_img, corner_list
if __name__ == "__main__":
A_ : Optional[Any] = HarrisCorner(0.04, 3)
A_, A_ : List[Any] = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 356
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
A_ : str = TypeVar("KEY")
A_ : List[Any] = TypeVar("VAL")
@dataclass(frozen=snake_case_ , slots=snake_case_ )
class a_ ( Generic[KEY, VAL] ):
'''simple docstring'''
lowerCamelCase__ : KEY
lowerCamelCase__ : VAL
class a_ ( _Item ):
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
super().__init__(lowerCamelCase_, lowerCamelCase_ )
def __bool__(self ):
'''simple docstring'''
return False
A_ : List[Any] = _DeletedItem()
class a_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__(self, lowerCamelCase_ = 8, lowerCamelCase_ = 0.75 ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = initial_block_size
lowerCamelCase__ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCamelCase__ : List[Any] = capacity_factor
lowerCamelCase__ : Optional[int] = 0
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self._buckets[ind]
if not stored:
lowerCamelCase__ : Tuple = _Item(lowerCamelCase_, lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
lowerCamelCase__ : Optional[int] = _Item(lowerCamelCase_, lowerCamelCase_ )
return True
else:
return False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCamelCase__ : Any = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = self._buckets
lowerCamelCase__ : Dict = [None] * new_size
lowerCamelCase__ : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def a__ (self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
lowerCamelCase__ : Tuple = self._get_next_ind(lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
break
def __setitem__(self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_, lowerCamelCase_ )
def __delitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
lowerCamelCase__ : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, lowerCamelCase_ ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 316
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ : int = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : int = ["CLIPFeatureExtractor"]
A_ : Any = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 357
|
"""simple docstring"""
def lowerCamelCase_ ( ):
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : List[Any] = 1
while len(_lowerCamelCase ) < 1e6:
constant.append(str(_lowerCamelCase ) )
i += 1
lowerCamelCase__ : str = ''.join(_lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 316
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : str = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'convbert'
def __init__(self, lowerCamelCase_=3_0_5_2_2, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=1, lowerCamelCase_=0, lowerCamelCase_=2, lowerCamelCase_=7_6_8, lowerCamelCase_=2, lowerCamelCase_=9, lowerCamelCase_=1, lowerCamelCase_=None, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(
pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
lowerCamelCase__ : Optional[int] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : List[Any] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = embedding_size
lowerCamelCase__ : int = head_ratio
lowerCamelCase__ : List[Any] = conv_kernel_size
lowerCamelCase__ : Optional[Any] = num_groups
lowerCamelCase__ : List[Any] = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def a__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 358
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A_ : Union[str, Any] = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A_ : int = concatenate_datasets
A_ : Any = DownloadConfig
A_ : List[Any] = DownloadManager
A_ : Optional[Any] = DownloadMode
A_ : List[str] = DownloadConfig
A_ : Optional[int] = DownloadMode
A_ : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 316
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 359
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
A_ : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : bool = field(default=snake_case_ , metadata={'help': 'Whether to use SortishSampler or not.'} )
lowerCamelCase__ : bool = field(
default=snake_case_ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=snake_case_ , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
lowerCamelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=snake_case_ , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_, lowerCamelCase_ ):
lowerCamelCase__ : Any = v.to_dict()
return d
| 316
| 0
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
A_ : int = namedtuple("covid_data", "cases deaths recovered")
def lowerCamelCase_ ( _lowerCamelCase = "https://www.worldometers.info/coronavirus/" ):
lowerCamelCase__ : Any = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(_lowerCamelCase ).content ).xpath(_lowerCamelCase ) )
A_ : Dict = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 360
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Tuple = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
lowerCamelCase__ : List[str] = len(_lowerCamelCase ) if (len(_lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(_lowerCamelCase ) , 'Postfix'.center(_lowerCamelCase ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_lowerCamelCase ) == 0:
stack.append(_lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(_lowerCamelCase ) # push x to stack
print(
x.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
while len(_lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , (''.join(_lowerCamelCase )).ljust(_lowerCamelCase ) , sep=' | ' , ) # Output in tabular format
return "".join(_lowerCamelCase ) # return Postfix as str
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(_lowerCamelCase ) ):
if infix[i] == "(":
lowerCamelCase__ : List[Any] = ')' # change "(" to ")"
elif infix[i] == ")":
lowerCamelCase__ : Tuple = '(' # change ")" to "("
return (infix_2_postfix(''.join(_lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
A_ : List[str] = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 316
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_2, lowerCamelCase_=5, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=4, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : int = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : Dict = use_attention_mask
lowerCamelCase__ : List[str] = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Tuple = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : str = max_position_embeddings
lowerCamelCase__ : Any = type_vocab_size
lowerCamelCase__ : List[str] = type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : List[str] = num_choices
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Any = None
if self.use_attention_mask:
lowerCamelCase__ : str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : str = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Optional[int] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : Optional[Any] = True
lowerCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : List[str] = True
lowerCamelCase__ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def a__ (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Dict = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : int = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
lowerCamelCase__ : List[Any] = model(lowerCamelCase_ )[0]
lowerCamelCase__ : Any = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ), lowerCamelCase_ )
# compare the actual values for a slice.
lowerCamelCase__ : str = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40', from_pt=lowerCamelCase_ )
lowerCamelCase__ : List[Any] = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]], dtype=jnp.intaa )
lowerCamelCase__ : Tuple = model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
lowerCamelCase__ : Union[str, Any] = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 361
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=7, lowerCamelCase_=3, lowerCamelCase_=1_8, lowerCamelCase_=3_0, lowerCamelCase_=4_0_0, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=None, lowerCamelCase_=True, lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=[0.5, 0.5, 0.5], lowerCamelCase_=False, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = size if size is not None else {'height': 2_0, 'width': 2_0}
lowerCamelCase__ : Optional[Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
lowerCamelCase__ : Any = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : List[str] = image_size
lowerCamelCase__ : Union[str, Any] = min_resolution
lowerCamelCase__ : str = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[Any] = size
lowerCamelCase__ : Optional[int] = do_center_crop
lowerCamelCase__ : Optional[int] = crop_size
lowerCamelCase__ : Optional[Any] = do_normalize
lowerCamelCase__ : Any = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : Dict = do_reduce_labels
def a__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : str = Image.open(dataset[0]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(dataset[1]['file'] )
return image, map
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : int = Image.open(ds[0]['file'] )
lowerCamelCase__ : Tuple = Image.open(ds[1]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(ds[2]['file'] )
lowerCamelCase__ : Optional[Any] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BeitImageProcessingTester(self )
@property
def a__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_, 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_, 'image_std' ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'height': 2_0, 'width': 2_0} )
self.assertEqual(image_processor.crop_size, {'height': 1_8, 'width': 1_8} )
self.assertEqual(image_processor.do_reduce_labels, lowerCamelCase_ )
lowerCamelCase__ : int = self.image_processing_class.from_dict(
self.image_processor_dict, size=4_2, crop_size=8_4, reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size, {'height': 4_2, 'width': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
self.assertEqual(image_processor.do_reduce_labels, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, np.ndarray )
# Test not batched input
lowerCamelCase__ : Any = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : Dict = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
lowerCamelCase__ : str = image_processing(lowerCamelCase_, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCamelCase_, torchify=lowerCamelCase_ )
lowerCamelCase__ : Tuple = []
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_, torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ : Tuple = image_processing(image_inputs[0], maps[0], return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched
lowerCamelCase__ : Union[str, Any] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
lowerCamelCase__ : int = prepare_semantic_single_inputs()
lowerCamelCase__ : Optional[Any] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
# Test batched input (PIL images)
lowerCamelCase__ : int = prepare_semantic_batch_inputs()
lowerCamelCase__ : Optional[int] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape, (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(
encoding['labels'].shape, (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
self.assertEqual(encoding['labels'].dtype, torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ : str = prepare_semantic_single_inputs()
lowerCamelCase__ : List[str] = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_5_0 )
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : int = image_processing(lowerCamelCase_, lowerCamelCase_, return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_5_5 )
| 362
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : list[list[int]] = []
lowerCamelCase__ : list[int] = []
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[Any] = sum(_lowerCamelCase )
create_state_space_tree(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return result
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
if sum(_lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(_lowerCamelCase )) < max_sum:
return
if sum(_lowerCamelCase ) == max_sum:
result.append(_lowerCamelCase )
return
for index in range(_lowerCamelCase , len(_lowerCamelCase ) ):
create_state_space_tree(
_lowerCamelCase , _lowerCamelCase , index + 1 , [*path, nums[index]] , _lowerCamelCase , remaining_nums_sum - nums[index] , )
A_ : Optional[Any] = [3, 34, 4, 12, 5, 2]
A_ : List[str] = 9
A_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 316
| 0
|
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : Any = [1]
lowerCamelCase__ : List[str] = 0, 0, 0
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
lowerCamelCase__ : Any = ugly_nums[ia] * 3
lowerCamelCase__ : int = ugly_nums[ia] * 5
for _ in range(1 , _lowerCamelCase ):
lowerCamelCase__ : List[str] = min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
lowerCamelCase__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
lowerCamelCase__ : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"{ugly_numbers(2_00) = }")
| 363
|
"""simple docstring"""
from __future__ import annotations
import queue
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = data
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
def lowerCamelCase_ ( ):
print('\n********Press N to stop entering at any point of time********\n' )
lowerCamelCase__ : str = input('Enter the value of the root node: ' ).strip().lower()
lowerCamelCase__ : queue.Queue = queue.Queue()
lowerCamelCase__ : Optional[Any] = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = q.get()
lowerCamelCase__ : str = f'''Enter the left node of {node_found.data}: '''
lowerCamelCase__ : Dict = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : str = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Dict = left_node
q.put(_lowerCamelCase )
lowerCamelCase__ : List[str] = f'''Enter the right node of {node_found.data}: '''
lowerCamelCase__ : List[str] = input(_lowerCamelCase ).strip().lower() or 'n'
if check == "n":
return tree_node
lowerCamelCase__ : Optional[int] = TreeNode(int(_lowerCamelCase ) )
lowerCamelCase__ : Any = right_node
q.put(_lowerCamelCase )
raise
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
lowerCamelCase__ : List[Any] = []
while not q.empty():
lowerCamelCase__ : str = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = n.left
# end of while means current node doesn't have left child
lowerCamelCase__ : List[Any] = stack.pop()
# start to traverse its right child
lowerCamelCase__ : Optional[Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ : list[TreeNode] = []
lowerCamelCase__ : int = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
lowerCamelCase__ : List[str] = n.left
lowerCamelCase__ : Tuple = stack.pop()
print(n.data , end=',' )
lowerCamelCase__ : Union[str, Any] = n.right
def lowerCamelCase_ ( _lowerCamelCase ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
lowerCamelCase__ , lowerCamelCase__ : Any = [], []
lowerCamelCase__ : int = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase__ : List[str] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def lowerCamelCase_ ( _lowerCamelCase = "" , _lowerCamelCase=50 , _lowerCamelCase="*" ):
if not s:
return "\n" + width * char
lowerCamelCase__ , lowerCamelCase__ : Dict = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
A_ : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 316
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Union[str, Any] = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = 'gptsan-japanese'
lowerCamelCase__ : Dict = [
'past_key_values',
]
lowerCamelCase__ : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self, lowerCamelCase_=3_6_0_0_0, lowerCamelCase_=1_2_8_0, lowerCamelCase_=1_0_2_4, lowerCamelCase_=8_1_9_2, lowerCamelCase_=4_0_9_6, lowerCamelCase_=1_2_8, lowerCamelCase_=1_0, lowerCamelCase_=0, lowerCamelCase_=1_6, lowerCamelCase_=1_6, lowerCamelCase_=1_2_8, lowerCamelCase_=0.0, lowerCamelCase_=1e-5, lowerCamelCase_=False, lowerCamelCase_=0.0, lowerCamelCase_="float32", lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=False, lowerCamelCase_=0.002, lowerCamelCase_=False, lowerCamelCase_=True, lowerCamelCase_=3_5_9_9_8, lowerCamelCase_=3_5_9_9_5, lowerCamelCase_=3_5_9_9_9, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : List[Any] = max_position_embeddings
lowerCamelCase__ : int = d_model
lowerCamelCase__ : List[Any] = d_ff
lowerCamelCase__ : str = d_ext
lowerCamelCase__ : Optional[Any] = d_spout
lowerCamelCase__ : Tuple = num_switch_layers
lowerCamelCase__ : List[Any] = num_ext_layers
lowerCamelCase__ : List[str] = num_switch_layers + num_ext_layers
lowerCamelCase__ : Union[str, Any] = num_heads
lowerCamelCase__ : List[Any] = num_experts
lowerCamelCase__ : List[Any] = expert_capacity
lowerCamelCase__ : Union[str, Any] = dropout_rate
lowerCamelCase__ : Optional[int] = layer_norm_epsilon
lowerCamelCase__ : List[Any] = router_bias
lowerCamelCase__ : Optional[Any] = router_jitter_noise
lowerCamelCase__ : Dict = router_dtype
lowerCamelCase__ : List[Any] = router_ignore_padding_tokens
lowerCamelCase__ : Optional[Any] = output_hidden_states
lowerCamelCase__ : List[str] = output_attentions
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : List[str] = output_router_logits
lowerCamelCase__ : List[str] = use_cache
super().__init__(
separator_token_id=lowerCamelCase_, pad_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_, )
| 364
|
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A_ : Optional[Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A_ : List[Any] = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.15},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
A_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A_ : str = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A_ : Optional[Any] = "allenai"
def lowerCamelCase_ ( _lowerCamelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase__ : List[Any] = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase__ : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
lowerCamelCase__ : List[str] = d[k] # restore
return da
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase__ : Optional[int] = basename(_lowerCamelCase )
lowerCamelCase__ : str = dirname(_lowerCamelCase )
lowerCamelCase__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase__ : int = cls.hub_models()
lowerCamelCase__ : str = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase__ : Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
lowerCamelCase__ : Any = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase__ : List[str] = vars(chkpt['args']['model'] )
lowerCamelCase__ : Optional[Any] = args['source_lang']
lowerCamelCase__ : List[str] = args['target_lang']
lowerCamelCase__ : List[str] = dirname(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = basename(_lowerCamelCase )
# dicts
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{src_lang}.txt''' )
lowerCamelCase__ : Optional[Any] = os.path.join(_lowerCamelCase , f'''dict.{tgt_lang}.txt''' )
lowerCamelCase__ : Dict = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : List[Any] = rewrite_dict_keys(src_dict.indices )
lowerCamelCase__ : int = len(_lowerCamelCase )
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase__ : Optional[int] = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase__ : int = False
break
lowerCamelCase__ : str = Dictionary.load(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase__ : Optional[Any] = len(_lowerCamelCase )
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase__ : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase__ : Union[str, Any] = fin.read()
lowerCamelCase__ : Any = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
lowerCamelCase__ : Optional[int] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase__ : str = 5
lowerCamelCase__ : Tuple = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase__ : List[str] = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase__ : List[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase__ : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : int = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase__ : List[str] = chkpt['models'][0]
lowerCamelCase__ : Optional[Any] = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase__ : str = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase__ : int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase__ : Any = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase__ : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ : Dict = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 316
| 0
|
"""simple docstring"""
A_ : str = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
10: "a",
11: "b",
12: "c",
13: "d",
14: "e",
15: "f",
}
def lowerCamelCase_ ( _lowerCamelCase ):
assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase )
lowerCamelCase__ : Any = int(_lowerCamelCase )
lowerCamelCase__ : Any = ''
lowerCamelCase__ : int = False
if decimal < 0:
lowerCamelCase__ : List[str] = True
decimal *= -1
while decimal > 0:
lowerCamelCase__ : Optional[int] = divmod(_lowerCamelCase , 16 )
lowerCamelCase__ : Union[str, Any] = values[remainder] + hexadecimal
lowerCamelCase__ : Dict = '0x' + hexadecimal
if negative:
lowerCamelCase__ : List[str] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( snake_case_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def a__ (self ):
'''simple docstring'''
raise NotImplementedError()
| 316
| 0
|
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ : Optional[int] = n - k
# Calculate C(n,k)
for i in range(_lowerCamelCase ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( _lowerCamelCase ):
return binomial_coefficient(2 * node_count , _lowerCamelCase ) // (node_count + 1)
def lowerCamelCase_ ( _lowerCamelCase ):
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ : int = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( _lowerCamelCase ):
return catalan_number(_lowerCamelCase ) * factorial(_lowerCamelCase )
if __name__ == "__main__":
A_ : List[Any] = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"Given {node_count} nodes, there are {binary_tree_count(node_count)} "
f"binary trees and {catalan_number(node_count)} binary search trees."
)
| 366
|
"""simple docstring"""
import re
def lowerCamelCase_ ( _lowerCamelCase ):
if len(re.findall('[ATCG]' , _lowerCamelCase ) ) != len(_lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.